1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_MIPS64
8
9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/debug/debug.h"
13 #include "src/mips64/macro-assembler-mips64.h"
14 #include "src/register-configuration.h"
15 #include "src/runtime/runtime.h"
16
17 namespace v8 {
18 namespace internal {
19
20 // Floating point constants.
21 const uint64_t kDoubleSignMask = Double::kSignMask;
22 const uint32_t kDoubleExponentShift = HeapNumber::kMantissaBits;
23 const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
24 const uint64_t kDoubleNaNMask = Double::kExponentMask | (1L << kDoubleNaNShift);
25
26 const uint32_t kSingleSignMask = kBinary32SignMask;
27 const uint32_t kSingleExponentMask = kBinary32ExponentMask;
28 const uint32_t kSingleExponentShift = kBinary32ExponentShift;
29 const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
30 const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
31
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)32 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
33 CodeObjectRequired create_code_object)
34 : Assembler(arg_isolate, buffer, size),
35 generating_stub_(false),
36 has_frame_(false),
37 has_double_zero_reg_set_(false) {
38 if (create_code_object == CodeObjectRequired::kYes) {
39 code_object_ =
40 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
41 }
42 }
43
Load(Register dst,const MemOperand & src,Representation r)44 void MacroAssembler::Load(Register dst,
45 const MemOperand& src,
46 Representation r) {
47 DCHECK(!r.IsDouble());
48 if (r.IsInteger8()) {
49 lb(dst, src);
50 } else if (r.IsUInteger8()) {
51 lbu(dst, src);
52 } else if (r.IsInteger16()) {
53 lh(dst, src);
54 } else if (r.IsUInteger16()) {
55 lhu(dst, src);
56 } else if (r.IsInteger32()) {
57 lw(dst, src);
58 } else {
59 ld(dst, src);
60 }
61 }
62
63
Store(Register src,const MemOperand & dst,Representation r)64 void MacroAssembler::Store(Register src,
65 const MemOperand& dst,
66 Representation r) {
67 DCHECK(!r.IsDouble());
68 if (r.IsInteger8() || r.IsUInteger8()) {
69 sb(src, dst);
70 } else if (r.IsInteger16() || r.IsUInteger16()) {
71 sh(src, dst);
72 } else if (r.IsInteger32()) {
73 sw(src, dst);
74 } else {
75 if (r.IsHeapObject()) {
76 AssertNotSmi(src);
77 } else if (r.IsSmi()) {
78 AssertSmi(src);
79 }
80 sd(src, dst);
81 }
82 }
83
84
LoadRoot(Register destination,Heap::RootListIndex index)85 void MacroAssembler::LoadRoot(Register destination,
86 Heap::RootListIndex index) {
87 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
88 }
89
90
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)91 void MacroAssembler::LoadRoot(Register destination,
92 Heap::RootListIndex index,
93 Condition cond,
94 Register src1, const Operand& src2) {
95 Branch(2, NegateCondition(cond), src1, src2);
96 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
97 }
98
99
StoreRoot(Register source,Heap::RootListIndex index)100 void MacroAssembler::StoreRoot(Register source,
101 Heap::RootListIndex index) {
102 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
103 sd(source, MemOperand(s6, index << kPointerSizeLog2));
104 }
105
106
StoreRoot(Register source,Heap::RootListIndex index,Condition cond,Register src1,const Operand & src2)107 void MacroAssembler::StoreRoot(Register source,
108 Heap::RootListIndex index,
109 Condition cond,
110 Register src1, const Operand& src2) {
111 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
112 Branch(2, NegateCondition(cond), src1, src2);
113 sd(source, MemOperand(s6, index << kPointerSizeLog2));
114 }
115
PushCommonFrame(Register marker_reg)116 void MacroAssembler::PushCommonFrame(Register marker_reg) {
117 if (marker_reg.is_valid()) {
118 Push(ra, fp, marker_reg);
119 Daddu(fp, sp, Operand(kPointerSize));
120 } else {
121 Push(ra, fp);
122 mov(fp, sp);
123 }
124 }
125
PopCommonFrame(Register marker_reg)126 void MacroAssembler::PopCommonFrame(Register marker_reg) {
127 if (marker_reg.is_valid()) {
128 Pop(ra, fp, marker_reg);
129 } else {
130 Pop(ra, fp);
131 }
132 }
133
PushStandardFrame(Register function_reg)134 void MacroAssembler::PushStandardFrame(Register function_reg) {
135 int offset = -StandardFrameConstants::kContextOffset;
136 if (function_reg.is_valid()) {
137 Push(ra, fp, cp, function_reg);
138 offset += kPointerSize;
139 } else {
140 Push(ra, fp, cp);
141 }
142 Daddu(fp, sp, Operand(offset));
143 }
144
145 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()146 void MacroAssembler::PushSafepointRegisters() {
147 // Safepoints expect a block of kNumSafepointRegisters values on the
148 // stack, so adjust the stack for unsaved registers.
149 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
150 DCHECK(num_unsaved >= 0);
151 if (num_unsaved > 0) {
152 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
153 }
154 MultiPush(kSafepointSavedRegisters);
155 }
156
157
PopSafepointRegisters()158 void MacroAssembler::PopSafepointRegisters() {
159 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
160 MultiPop(kSafepointSavedRegisters);
161 if (num_unsaved > 0) {
162 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
163 }
164 }
165
166
StoreToSafepointRegisterSlot(Register src,Register dst)167 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
168 sd(src, SafepointRegisterSlot(dst));
169 }
170
171
LoadFromSafepointRegisterSlot(Register dst,Register src)172 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
173 ld(dst, SafepointRegisterSlot(src));
174 }
175
176
SafepointRegisterStackIndex(int reg_code)177 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
178 // The registers are pushed starting with the highest encoding,
179 // which means that lowest encodings are closest to the stack pointer.
180 return kSafepointRegisterStackIndexMap[reg_code];
181 }
182
183
SafepointRegisterSlot(Register reg)184 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
185 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
186 }
187
188
SafepointRegistersAndDoublesSlot(Register reg)189 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
190 UNIMPLEMENTED_MIPS();
191 // General purpose registers are pushed last on the stack.
192 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
193 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
194 return MemOperand(sp, doubles_size + register_offset);
195 }
196
197
InNewSpace(Register object,Register scratch,Condition cc,Label * branch)198 void MacroAssembler::InNewSpace(Register object,
199 Register scratch,
200 Condition cc,
201 Label* branch) {
202 DCHECK(cc == eq || cc == ne);
203 const int mask =
204 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
205 CheckPageFlag(object, scratch, mask, cc, branch);
206 }
207
208
209 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
210 // The register 'object' contains a heap object pointer. The heap object
211 // tag is shifted away.
RecordWriteField(Register object,int offset,Register value,Register dst,RAStatus ra_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)212 void MacroAssembler::RecordWriteField(
213 Register object,
214 int offset,
215 Register value,
216 Register dst,
217 RAStatus ra_status,
218 SaveFPRegsMode save_fp,
219 RememberedSetAction remembered_set_action,
220 SmiCheck smi_check,
221 PointersToHereCheck pointers_to_here_check_for_value) {
222 DCHECK(!AreAliased(value, dst, t8, object));
223 // First, check if a write barrier is even needed. The tests below
224 // catch stores of Smis.
225 Label done;
226
227 // Skip barrier if writing a smi.
228 if (smi_check == INLINE_SMI_CHECK) {
229 JumpIfSmi(value, &done);
230 }
231
232 // Although the object register is tagged, the offset is relative to the start
233 // of the object, so so offset must be a multiple of kPointerSize.
234 DCHECK(IsAligned(offset, kPointerSize));
235
236 Daddu(dst, object, Operand(offset - kHeapObjectTag));
237 if (emit_debug_code()) {
238 Label ok;
239 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
240 Branch(&ok, eq, t8, Operand(zero_reg));
241 stop("Unaligned cell in write barrier");
242 bind(&ok);
243 }
244
245 RecordWrite(object,
246 dst,
247 value,
248 ra_status,
249 save_fp,
250 remembered_set_action,
251 OMIT_SMI_CHECK,
252 pointers_to_here_check_for_value);
253
254 bind(&done);
255
256 // Clobber clobbered input registers when running with the debug-code flag
257 // turned on to provoke errors.
258 if (emit_debug_code()) {
259 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
260 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
261 }
262 }
263
264
265 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
RecordWriteForMap(Register object,Register map,Register dst,RAStatus ra_status,SaveFPRegsMode fp_mode)266 void MacroAssembler::RecordWriteForMap(Register object,
267 Register map,
268 Register dst,
269 RAStatus ra_status,
270 SaveFPRegsMode fp_mode) {
271 if (emit_debug_code()) {
272 DCHECK(!dst.is(at));
273 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
274 Check(eq,
275 kWrongAddressOrValuePassedToRecordWrite,
276 dst,
277 Operand(isolate()->factory()->meta_map()));
278 }
279
280 if (!FLAG_incremental_marking) {
281 return;
282 }
283
284 if (emit_debug_code()) {
285 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
286 Check(eq,
287 kWrongAddressOrValuePassedToRecordWrite,
288 map,
289 Operand(at));
290 }
291
292 Label done;
293
294 // A single check of the map's pages interesting flag suffices, since it is
295 // only set during incremental collection, and then it's also guaranteed that
296 // the from object's page's interesting flag is also set. This optimization
297 // relies on the fact that maps can never be in new space.
298 CheckPageFlag(map,
299 map, // Used as scratch.
300 MemoryChunk::kPointersToHereAreInterestingMask,
301 eq,
302 &done);
303
304 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
305 if (emit_debug_code()) {
306 Label ok;
307 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
308 Branch(&ok, eq, at, Operand(zero_reg));
309 stop("Unaligned cell in write barrier");
310 bind(&ok);
311 }
312
313 // Record the actual write.
314 if (ra_status == kRAHasNotBeenSaved) {
315 push(ra);
316 }
317 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
318 fp_mode);
319 CallStub(&stub);
320 if (ra_status == kRAHasNotBeenSaved) {
321 pop(ra);
322 }
323
324 bind(&done);
325
326 // Count number of write barriers in generated code.
327 isolate()->counters()->write_barriers_static()->Increment();
328 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
329
330 // Clobber clobbered registers when running with the debug-code flag
331 // turned on to provoke errors.
332 if (emit_debug_code()) {
333 li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
334 li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
335 }
336 }
337
338
339 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
340 // The register 'object' contains a heap object pointer. The heap object
341 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,RAStatus ra_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)342 void MacroAssembler::RecordWrite(
343 Register object,
344 Register address,
345 Register value,
346 RAStatus ra_status,
347 SaveFPRegsMode fp_mode,
348 RememberedSetAction remembered_set_action,
349 SmiCheck smi_check,
350 PointersToHereCheck pointers_to_here_check_for_value) {
351 DCHECK(!AreAliased(object, address, value, t8));
352 DCHECK(!AreAliased(object, address, value, t9));
353
354 if (emit_debug_code()) {
355 ld(at, MemOperand(address));
356 Assert(
357 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
358 }
359
360 if (remembered_set_action == OMIT_REMEMBERED_SET &&
361 !FLAG_incremental_marking) {
362 return;
363 }
364
365 // First, check if a write barrier is even needed. The tests below
366 // catch stores of smis and stores into the young generation.
367 Label done;
368
369 if (smi_check == INLINE_SMI_CHECK) {
370 DCHECK_EQ(0, kSmiTag);
371 JumpIfSmi(value, &done);
372 }
373
374 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
375 CheckPageFlag(value,
376 value, // Used as scratch.
377 MemoryChunk::kPointersToHereAreInterestingMask,
378 eq,
379 &done);
380 }
381 CheckPageFlag(object,
382 value, // Used as scratch.
383 MemoryChunk::kPointersFromHereAreInterestingMask,
384 eq,
385 &done);
386
387 // Record the actual write.
388 if (ra_status == kRAHasNotBeenSaved) {
389 push(ra);
390 }
391 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
392 fp_mode);
393 CallStub(&stub);
394 if (ra_status == kRAHasNotBeenSaved) {
395 pop(ra);
396 }
397
398 bind(&done);
399
400 // Count number of write barriers in generated code.
401 isolate()->counters()->write_barriers_static()->Increment();
402 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
403 value);
404
405 // Clobber clobbered registers when running with the debug-code flag
406 // turned on to provoke errors.
407 if (emit_debug_code()) {
408 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
409 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
410 }
411 }
412
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)413 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
414 Register code_entry,
415 Register scratch) {
416 const int offset = JSFunction::kCodeEntryOffset;
417
418 // Since a code entry (value) is always in old space, we don't need to update
419 // remembered set. If incremental marking is off, there is nothing for us to
420 // do.
421 if (!FLAG_incremental_marking) return;
422
423 DCHECK(js_function.is(a1));
424 DCHECK(code_entry.is(a4));
425 DCHECK(scratch.is(a5));
426 AssertNotSmi(js_function);
427
428 if (emit_debug_code()) {
429 Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
430 ld(at, MemOperand(scratch));
431 Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
432 Operand(code_entry));
433 }
434
435 // First, check if a write barrier is even needed. The tests below
436 // catch stores of Smis and stores into young gen.
437 Label done;
438
439 CheckPageFlag(code_entry, scratch,
440 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
441 CheckPageFlag(js_function, scratch,
442 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
443
444 const Register dst = scratch;
445 Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
446
447 // Save caller-saved registers. js_function and code_entry are in the
448 // caller-saved register list.
449 DCHECK(kJSCallerSaved & js_function.bit());
450 DCHECK(kJSCallerSaved & code_entry.bit());
451 MultiPush(kJSCallerSaved | ra.bit());
452
453 int argument_count = 3;
454
455 PrepareCallCFunction(argument_count, code_entry);
456
457 Move(a0, js_function);
458 Move(a1, dst);
459 li(a2, Operand(ExternalReference::isolate_address(isolate())));
460
461 {
462 AllowExternalCallThatCantCauseGC scope(this);
463 CallCFunction(
464 ExternalReference::incremental_marking_record_write_code_entry_function(
465 isolate()),
466 argument_count);
467 }
468
469 // Restore caller-saved registers.
470 MultiPop(kJSCallerSaved | ra.bit());
471
472 bind(&done);
473 }
474
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)475 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
476 Register address,
477 Register scratch,
478 SaveFPRegsMode fp_mode,
479 RememberedSetFinalAction and_then) {
480 Label done;
481 if (emit_debug_code()) {
482 Label ok;
483 JumpIfNotInNewSpace(object, scratch, &ok);
484 stop("Remembered set pointer is in new space");
485 bind(&ok);
486 }
487 // Load store buffer top.
488 ExternalReference store_buffer =
489 ExternalReference::store_buffer_top(isolate());
490 li(t8, Operand(store_buffer));
491 ld(scratch, MemOperand(t8));
492 // Store pointer to buffer and increment buffer top.
493 sd(address, MemOperand(scratch));
494 Daddu(scratch, scratch, kPointerSize);
495 // Write back new top of buffer.
496 sd(scratch, MemOperand(t8));
497 // Call stub on end of buffer.
498 // Check for end of buffer.
499 And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
500 DCHECK(!scratch.is(t8));
501 if (and_then == kFallThroughAtEnd) {
502 Branch(&done, ne, t8, Operand(zero_reg));
503 } else {
504 DCHECK(and_then == kReturnAtEnd);
505 Ret(ne, t8, Operand(zero_reg));
506 }
507 push(ra);
508 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
509 CallStub(&store_buffer_overflow);
510 pop(ra);
511 bind(&done);
512 if (and_then == kReturnAtEnd) {
513 Ret();
514 }
515 }
516
517
518 // -----------------------------------------------------------------------------
519 // Allocation support.
520
521
CheckAccessGlobalProxy(Register holder_reg,Register scratch,Label * miss)522 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
523 Register scratch,
524 Label* miss) {
525 Label same_contexts;
526 Register temporary = t8;
527
528 DCHECK(!holder_reg.is(scratch));
529 DCHECK(!holder_reg.is(at));
530 DCHECK(!scratch.is(at));
531
532 // Load current lexical context from the active StandardFrame, which
533 // may require crawling past STUB frames.
534 Label load_context;
535 Label has_context;
536 mov(at, fp);
537 bind(&load_context);
538 ld(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
539 // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
540 JumpIfNotSmi(scratch, &has_context, temporary);
541 ld(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
542 Branch(&load_context);
543 bind(&has_context);
544
545 // In debug mode, make sure the lexical context is set.
546 #ifdef DEBUG
547 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
548 scratch, Operand(zero_reg));
549 #endif
550
551 // Load the native context of the current context.
552 ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
553
554 // Check the context is a native context.
555 if (emit_debug_code()) {
556 push(holder_reg); // Temporarily save holder on the stack.
557 // Read the first word and compare to the native_context_map.
558 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
559 LoadRoot(at, Heap::kNativeContextMapRootIndex);
560 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
561 holder_reg, Operand(at));
562 pop(holder_reg); // Restore holder.
563 }
564
565 // Check if both contexts are the same.
566 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
567 Branch(&same_contexts, eq, scratch, Operand(at));
568
569 // Check the context is a native context.
570 if (emit_debug_code()) {
571 push(holder_reg); // Temporarily save holder on the stack.
572 mov(holder_reg, at); // Move at to its holding place.
573 LoadRoot(at, Heap::kNullValueRootIndex);
574 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
575 holder_reg, Operand(at));
576
577 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
578 LoadRoot(at, Heap::kNativeContextMapRootIndex);
579 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
580 holder_reg, Operand(at));
581 // Restore at is not needed. at is reloaded below.
582 pop(holder_reg); // Restore holder.
583 // Restore at to holder's context.
584 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
585 }
586
587 // Check that the security token in the calling global object is
588 // compatible with the security token in the receiving global
589 // object.
590 int token_offset = Context::kHeaderSize +
591 Context::SECURITY_TOKEN_INDEX * kPointerSize;
592
593 ld(scratch, FieldMemOperand(scratch, token_offset));
594 ld(at, FieldMemOperand(at, token_offset));
595 Branch(miss, ne, scratch, Operand(at));
596
597 bind(&same_contexts);
598 }
599
600
601 // Compute the hash code from the untagged key. This must be kept in sync with
602 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
603 // code-stub-hydrogen.cc
GetNumberHash(Register reg0,Register scratch)604 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
605 // First of all we assign the hash seed to scratch.
606 LoadRoot(scratch, Heap::kHashSeedRootIndex);
607 SmiUntag(scratch);
608
609 // Xor original key with a seed.
610 xor_(reg0, reg0, scratch);
611
612 // Compute the hash code from the untagged key. This must be kept in sync
613 // with ComputeIntegerHash in utils.h.
614 //
615 // hash = ~hash + (hash << 15);
616 // The algorithm uses 32-bit integer values.
617 nor(scratch, reg0, zero_reg);
618 Lsa(reg0, scratch, reg0, 15);
619
620 // hash = hash ^ (hash >> 12);
621 srl(at, reg0, 12);
622 xor_(reg0, reg0, at);
623
624 // hash = hash + (hash << 2);
625 Lsa(reg0, reg0, reg0, 2);
626
627 // hash = hash ^ (hash >> 4);
628 srl(at, reg0, 4);
629 xor_(reg0, reg0, at);
630
631 // hash = hash * 2057;
632 sll(scratch, reg0, 11);
633 Lsa(reg0, reg0, reg0, 3);
634 addu(reg0, reg0, scratch);
635
636 // hash = hash ^ (hash >> 16);
637 srl(at, reg0, 16);
638 xor_(reg0, reg0, at);
639 And(reg0, reg0, Operand(0x3fffffff));
640 }
641
642
LoadFromNumberDictionary(Label * miss,Register elements,Register key,Register result,Register reg0,Register reg1,Register reg2)643 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
644 Register elements,
645 Register key,
646 Register result,
647 Register reg0,
648 Register reg1,
649 Register reg2) {
650 // Register use:
651 //
652 // elements - holds the slow-case elements of the receiver on entry.
653 // Unchanged unless 'result' is the same register.
654 //
655 // key - holds the smi key on entry.
656 // Unchanged unless 'result' is the same register.
657 //
658 //
659 // result - holds the result on exit if the load succeeded.
660 // Allowed to be the same as 'key' or 'result'.
661 // Unchanged on bailout so 'key' or 'result' can be used
662 // in further computation.
663 //
664 // Scratch registers:
665 //
666 // reg0 - holds the untagged key on entry and holds the hash once computed.
667 //
668 // reg1 - Used to hold the capacity mask of the dictionary.
669 //
670 // reg2 - Used for the index into the dictionary.
671 // at - Temporary (avoid MacroAssembler instructions also using 'at').
672 Label done;
673
674 GetNumberHash(reg0, reg1);
675
676 // Compute the capacity mask.
677 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
678 SmiUntag(reg1, reg1);
679 Dsubu(reg1, reg1, Operand(1));
680
681 // Generate an unrolled loop that performs a few probes before giving up.
682 for (int i = 0; i < kNumberDictionaryProbes; i++) {
683 // Use reg2 for index calculations and keep the hash intact in reg0.
684 mov(reg2, reg0);
685 // Compute the masked index: (hash + i + i * i) & mask.
686 if (i > 0) {
687 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
688 }
689 and_(reg2, reg2, reg1);
690
691 // Scale the index by multiplying by the element size.
692 DCHECK(SeededNumberDictionary::kEntrySize == 3);
693 Dlsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
694
695 // Check if the key is identical to the name.
696 Dlsa(reg2, elements, reg2, kPointerSizeLog2);
697
698 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
699 if (i != kNumberDictionaryProbes - 1) {
700 Branch(&done, eq, key, Operand(at));
701 } else {
702 Branch(miss, ne, key, Operand(at));
703 }
704 }
705
706 bind(&done);
707 // Check that the value is a field property.
708 // reg2: elements + (index * kPointerSize).
709 const int kDetailsOffset =
710 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
711 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
712 DCHECK_EQ(DATA, 0);
713 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
714 Branch(miss, ne, at, Operand(zero_reg));
715
716 // Get the value at the masked, scaled index and return.
717 const int kValueOffset =
718 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
719 ld(result, FieldMemOperand(reg2, kValueOffset));
720 }
721
722
723 // ---------------------------------------------------------------------------
724 // Instruction macros.
725
Addu(Register rd,Register rs,const Operand & rt)726 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
727 if (rt.is_reg()) {
728 addu(rd, rs, rt.rm());
729 } else {
730 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
731 addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
732 } else {
733 // li handles the relocation.
734 DCHECK(!rs.is(at));
735 li(at, rt);
736 addu(rd, rs, at);
737 }
738 }
739 }
740
741
Daddu(Register rd,Register rs,const Operand & rt)742 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
743 if (rt.is_reg()) {
744 daddu(rd, rs, rt.rm());
745 } else {
746 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
747 daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
748 } else {
749 // li handles the relocation.
750 DCHECK(!rs.is(at));
751 li(at, rt);
752 daddu(rd, rs, at);
753 }
754 }
755 }
756
757
Subu(Register rd,Register rs,const Operand & rt)758 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
759 if (rt.is_reg()) {
760 subu(rd, rs, rt.rm());
761 } else {
762 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
763 addiu(rd, rs, static_cast<int32_t>(
764 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
765 } else {
766 // li handles the relocation.
767 DCHECK(!rs.is(at));
768 li(at, rt);
769 subu(rd, rs, at);
770 }
771 }
772 }
773
774
Dsubu(Register rd,Register rs,const Operand & rt)775 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
776 if (rt.is_reg()) {
777 dsubu(rd, rs, rt.rm());
778 } else {
779 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
780 daddiu(rd, rs,
781 static_cast<int32_t>(
782 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
783 } else {
784 // li handles the relocation.
785 DCHECK(!rs.is(at));
786 li(at, rt);
787 dsubu(rd, rs, at);
788 }
789 }
790 }
791
792
Mul(Register rd,Register rs,const Operand & rt)793 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
794 if (rt.is_reg()) {
795 mul(rd, rs, rt.rm());
796 } else {
797 // li handles the relocation.
798 DCHECK(!rs.is(at));
799 li(at, rt);
800 mul(rd, rs, at);
801 }
802 }
803
804
Mulh(Register rd,Register rs,const Operand & rt)805 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
806 if (rt.is_reg()) {
807 if (kArchVariant != kMips64r6) {
808 mult(rs, rt.rm());
809 mfhi(rd);
810 } else {
811 muh(rd, rs, rt.rm());
812 }
813 } else {
814 // li handles the relocation.
815 DCHECK(!rs.is(at));
816 li(at, rt);
817 if (kArchVariant != kMips64r6) {
818 mult(rs, at);
819 mfhi(rd);
820 } else {
821 muh(rd, rs, at);
822 }
823 }
824 }
825
826
Mulhu(Register rd,Register rs,const Operand & rt)827 void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
828 if (rt.is_reg()) {
829 if (kArchVariant != kMips64r6) {
830 multu(rs, rt.rm());
831 mfhi(rd);
832 } else {
833 muhu(rd, rs, rt.rm());
834 }
835 } else {
836 // li handles the relocation.
837 DCHECK(!rs.is(at));
838 li(at, rt);
839 if (kArchVariant != kMips64r6) {
840 multu(rs, at);
841 mfhi(rd);
842 } else {
843 muhu(rd, rs, at);
844 }
845 }
846 }
847
848
Dmul(Register rd,Register rs,const Operand & rt)849 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
850 if (rt.is_reg()) {
851 if (kArchVariant == kMips64r6) {
852 dmul(rd, rs, rt.rm());
853 } else {
854 dmult(rs, rt.rm());
855 mflo(rd);
856 }
857 } else {
858 // li handles the relocation.
859 DCHECK(!rs.is(at));
860 li(at, rt);
861 if (kArchVariant == kMips64r6) {
862 dmul(rd, rs, at);
863 } else {
864 dmult(rs, at);
865 mflo(rd);
866 }
867 }
868 }
869
870
Dmulh(Register rd,Register rs,const Operand & rt)871 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
872 if (rt.is_reg()) {
873 if (kArchVariant == kMips64r6) {
874 dmuh(rd, rs, rt.rm());
875 } else {
876 dmult(rs, rt.rm());
877 mfhi(rd);
878 }
879 } else {
880 // li handles the relocation.
881 DCHECK(!rs.is(at));
882 li(at, rt);
883 if (kArchVariant == kMips64r6) {
884 dmuh(rd, rs, at);
885 } else {
886 dmult(rs, at);
887 mfhi(rd);
888 }
889 }
890 }
891
892
Mult(Register rs,const Operand & rt)893 void MacroAssembler::Mult(Register rs, const Operand& rt) {
894 if (rt.is_reg()) {
895 mult(rs, rt.rm());
896 } else {
897 // li handles the relocation.
898 DCHECK(!rs.is(at));
899 li(at, rt);
900 mult(rs, at);
901 }
902 }
903
904
Dmult(Register rs,const Operand & rt)905 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
906 if (rt.is_reg()) {
907 dmult(rs, rt.rm());
908 } else {
909 // li handles the relocation.
910 DCHECK(!rs.is(at));
911 li(at, rt);
912 dmult(rs, at);
913 }
914 }
915
916
Multu(Register rs,const Operand & rt)917 void MacroAssembler::Multu(Register rs, const Operand& rt) {
918 if (rt.is_reg()) {
919 multu(rs, rt.rm());
920 } else {
921 // li handles the relocation.
922 DCHECK(!rs.is(at));
923 li(at, rt);
924 multu(rs, at);
925 }
926 }
927
928
Dmultu(Register rs,const Operand & rt)929 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
930 if (rt.is_reg()) {
931 dmultu(rs, rt.rm());
932 } else {
933 // li handles the relocation.
934 DCHECK(!rs.is(at));
935 li(at, rt);
936 dmultu(rs, at);
937 }
938 }
939
940
Div(Register rs,const Operand & rt)941 void MacroAssembler::Div(Register rs, const Operand& rt) {
942 if (rt.is_reg()) {
943 div(rs, rt.rm());
944 } else {
945 // li handles the relocation.
946 DCHECK(!rs.is(at));
947 li(at, rt);
948 div(rs, at);
949 }
950 }
951
952
Div(Register res,Register rs,const Operand & rt)953 void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
954 if (rt.is_reg()) {
955 if (kArchVariant != kMips64r6) {
956 div(rs, rt.rm());
957 mflo(res);
958 } else {
959 div(res, rs, rt.rm());
960 }
961 } else {
962 // li handles the relocation.
963 DCHECK(!rs.is(at));
964 li(at, rt);
965 if (kArchVariant != kMips64r6) {
966 div(rs, at);
967 mflo(res);
968 } else {
969 div(res, rs, at);
970 }
971 }
972 }
973
974
Mod(Register rd,Register rs,const Operand & rt)975 void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
976 if (rt.is_reg()) {
977 if (kArchVariant != kMips64r6) {
978 div(rs, rt.rm());
979 mfhi(rd);
980 } else {
981 mod(rd, rs, rt.rm());
982 }
983 } else {
984 // li handles the relocation.
985 DCHECK(!rs.is(at));
986 li(at, rt);
987 if (kArchVariant != kMips64r6) {
988 div(rs, at);
989 mfhi(rd);
990 } else {
991 mod(rd, rs, at);
992 }
993 }
994 }
995
996
Modu(Register rd,Register rs,const Operand & rt)997 void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
998 if (rt.is_reg()) {
999 if (kArchVariant != kMips64r6) {
1000 divu(rs, rt.rm());
1001 mfhi(rd);
1002 } else {
1003 modu(rd, rs, rt.rm());
1004 }
1005 } else {
1006 // li handles the relocation.
1007 DCHECK(!rs.is(at));
1008 li(at, rt);
1009 if (kArchVariant != kMips64r6) {
1010 divu(rs, at);
1011 mfhi(rd);
1012 } else {
1013 modu(rd, rs, at);
1014 }
1015 }
1016 }
1017
1018
Ddiv(Register rs,const Operand & rt)1019 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
1020 if (rt.is_reg()) {
1021 ddiv(rs, rt.rm());
1022 } else {
1023 // li handles the relocation.
1024 DCHECK(!rs.is(at));
1025 li(at, rt);
1026 ddiv(rs, at);
1027 }
1028 }
1029
1030
Ddiv(Register rd,Register rs,const Operand & rt)1031 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
1032 if (kArchVariant != kMips64r6) {
1033 if (rt.is_reg()) {
1034 ddiv(rs, rt.rm());
1035 mflo(rd);
1036 } else {
1037 // li handles the relocation.
1038 DCHECK(!rs.is(at));
1039 li(at, rt);
1040 ddiv(rs, at);
1041 mflo(rd);
1042 }
1043 } else {
1044 if (rt.is_reg()) {
1045 ddiv(rd, rs, rt.rm());
1046 } else {
1047 // li handles the relocation.
1048 DCHECK(!rs.is(at));
1049 li(at, rt);
1050 ddiv(rd, rs, at);
1051 }
1052 }
1053 }
1054
1055
Divu(Register rs,const Operand & rt)1056 void MacroAssembler::Divu(Register rs, const Operand& rt) {
1057 if (rt.is_reg()) {
1058 divu(rs, rt.rm());
1059 } else {
1060 // li handles the relocation.
1061 DCHECK(!rs.is(at));
1062 li(at, rt);
1063 divu(rs, at);
1064 }
1065 }
1066
1067
Divu(Register res,Register rs,const Operand & rt)1068 void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
1069 if (rt.is_reg()) {
1070 if (kArchVariant != kMips64r6) {
1071 divu(rs, rt.rm());
1072 mflo(res);
1073 } else {
1074 divu(res, rs, rt.rm());
1075 }
1076 } else {
1077 // li handles the relocation.
1078 DCHECK(!rs.is(at));
1079 li(at, rt);
1080 if (kArchVariant != kMips64r6) {
1081 divu(rs, at);
1082 mflo(res);
1083 } else {
1084 divu(res, rs, at);
1085 }
1086 }
1087 }
1088
1089
Ddivu(Register rs,const Operand & rt)1090 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
1091 if (rt.is_reg()) {
1092 ddivu(rs, rt.rm());
1093 } else {
1094 // li handles the relocation.
1095 DCHECK(!rs.is(at));
1096 li(at, rt);
1097 ddivu(rs, at);
1098 }
1099 }
1100
1101
Ddivu(Register res,Register rs,const Operand & rt)1102 void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
1103 if (rt.is_reg()) {
1104 if (kArchVariant != kMips64r6) {
1105 ddivu(rs, rt.rm());
1106 mflo(res);
1107 } else {
1108 ddivu(res, rs, rt.rm());
1109 }
1110 } else {
1111 // li handles the relocation.
1112 DCHECK(!rs.is(at));
1113 li(at, rt);
1114 if (kArchVariant != kMips64r6) {
1115 ddivu(rs, at);
1116 mflo(res);
1117 } else {
1118 ddivu(res, rs, at);
1119 }
1120 }
1121 }
1122
1123
Dmod(Register rd,Register rs,const Operand & rt)1124 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
1125 if (kArchVariant != kMips64r6) {
1126 if (rt.is_reg()) {
1127 ddiv(rs, rt.rm());
1128 mfhi(rd);
1129 } else {
1130 // li handles the relocation.
1131 DCHECK(!rs.is(at));
1132 li(at, rt);
1133 ddiv(rs, at);
1134 mfhi(rd);
1135 }
1136 } else {
1137 if (rt.is_reg()) {
1138 dmod(rd, rs, rt.rm());
1139 } else {
1140 // li handles the relocation.
1141 DCHECK(!rs.is(at));
1142 li(at, rt);
1143 dmod(rd, rs, at);
1144 }
1145 }
1146 }
1147
1148
Dmodu(Register rd,Register rs,const Operand & rt)1149 void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
1150 if (kArchVariant != kMips64r6) {
1151 if (rt.is_reg()) {
1152 ddivu(rs, rt.rm());
1153 mfhi(rd);
1154 } else {
1155 // li handles the relocation.
1156 DCHECK(!rs.is(at));
1157 li(at, rt);
1158 ddivu(rs, at);
1159 mfhi(rd);
1160 }
1161 } else {
1162 if (rt.is_reg()) {
1163 dmodu(rd, rs, rt.rm());
1164 } else {
1165 // li handles the relocation.
1166 DCHECK(!rs.is(at));
1167 li(at, rt);
1168 dmodu(rd, rs, at);
1169 }
1170 }
1171 }
1172
1173
And(Register rd,Register rs,const Operand & rt)1174 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1175 if (rt.is_reg()) {
1176 and_(rd, rs, rt.rm());
1177 } else {
1178 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1179 andi(rd, rs, static_cast<int32_t>(rt.imm64_));
1180 } else {
1181 // li handles the relocation.
1182 DCHECK(!rs.is(at));
1183 li(at, rt);
1184 and_(rd, rs, at);
1185 }
1186 }
1187 }
1188
1189
Or(Register rd,Register rs,const Operand & rt)1190 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1191 if (rt.is_reg()) {
1192 or_(rd, rs, rt.rm());
1193 } else {
1194 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1195 ori(rd, rs, static_cast<int32_t>(rt.imm64_));
1196 } else {
1197 // li handles the relocation.
1198 DCHECK(!rs.is(at));
1199 li(at, rt);
1200 or_(rd, rs, at);
1201 }
1202 }
1203 }
1204
1205
Xor(Register rd,Register rs,const Operand & rt)1206 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1207 if (rt.is_reg()) {
1208 xor_(rd, rs, rt.rm());
1209 } else {
1210 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1211 xori(rd, rs, static_cast<int32_t>(rt.imm64_));
1212 } else {
1213 // li handles the relocation.
1214 DCHECK(!rs.is(at));
1215 li(at, rt);
1216 xor_(rd, rs, at);
1217 }
1218 }
1219 }
1220
1221
Nor(Register rd,Register rs,const Operand & rt)1222 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1223 if (rt.is_reg()) {
1224 nor(rd, rs, rt.rm());
1225 } else {
1226 // li handles the relocation.
1227 DCHECK(!rs.is(at));
1228 li(at, rt);
1229 nor(rd, rs, at);
1230 }
1231 }
1232
1233
Neg(Register rs,const Operand & rt)1234 void MacroAssembler::Neg(Register rs, const Operand& rt) {
1235 DCHECK(rt.is_reg());
1236 DCHECK(!at.is(rs));
1237 DCHECK(!at.is(rt.rm()));
1238 li(at, -1);
1239 xor_(rs, rt.rm(), at);
1240 }
1241
1242
Slt(Register rd,Register rs,const Operand & rt)1243 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1244 if (rt.is_reg()) {
1245 slt(rd, rs, rt.rm());
1246 } else {
1247 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1248 slti(rd, rs, static_cast<int32_t>(rt.imm64_));
1249 } else {
1250 // li handles the relocation.
1251 DCHECK(!rs.is(at));
1252 li(at, rt);
1253 slt(rd, rs, at);
1254 }
1255 }
1256 }
1257
1258
Sltu(Register rd,Register rs,const Operand & rt)1259 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1260 if (rt.is_reg()) {
1261 sltu(rd, rs, rt.rm());
1262 } else {
1263 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1264 sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
1265 } else {
1266 // li handles the relocation.
1267 DCHECK(!rs.is(at));
1268 li(at, rt);
1269 sltu(rd, rs, at);
1270 }
1271 }
1272 }
1273
1274
Ror(Register rd,Register rs,const Operand & rt)1275 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1276 if (rt.is_reg()) {
1277 rotrv(rd, rs, rt.rm());
1278 } else {
1279 int64_t ror_value = rt.imm64_ % 32;
1280 if (ror_value < 0) {
1281 ror_value += 32;
1282 }
1283 rotr(rd, rs, ror_value);
1284 }
1285 }
1286
1287
Dror(Register rd,Register rs,const Operand & rt)1288 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1289 if (rt.is_reg()) {
1290 drotrv(rd, rs, rt.rm());
1291 } else {
1292 int64_t dror_value = rt.imm64_ % 64;
1293 if (dror_value < 0) dror_value += 64;
1294 if (dror_value <= 31) {
1295 drotr(rd, rs, dror_value);
1296 } else {
1297 drotr32(rd, rs, dror_value - 32);
1298 }
1299 }
1300 }
1301
1302
Pref(int32_t hint,const MemOperand & rs)1303 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1304 pref(hint, rs);
1305 }
1306
1307
Lsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1308 void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1309 Register scratch) {
1310 DCHECK(sa >= 1 && sa <= 31);
1311 if (kArchVariant == kMips64r6 && sa <= 4) {
1312 lsa(rd, rt, rs, sa - 1);
1313 } else {
1314 Register tmp = rd.is(rt) ? scratch : rd;
1315 DCHECK(!tmp.is(rt));
1316 sll(tmp, rs, sa);
1317 Addu(rd, rt, tmp);
1318 }
1319 }
1320
1321
Dlsa(Register rd,Register rt,Register rs,uint8_t sa,Register scratch)1322 void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1323 Register scratch) {
1324 DCHECK(sa >= 1 && sa <= 31);
1325 if (kArchVariant == kMips64r6 && sa <= 4) {
1326 dlsa(rd, rt, rs, sa - 1);
1327 } else {
1328 Register tmp = rd.is(rt) ? scratch : rd;
1329 DCHECK(!tmp.is(rt));
1330 dsll(tmp, rs, sa);
1331 Daddu(rd, rt, tmp);
1332 }
1333 }
1334
1335
1336 // ------------Pseudo-instructions-------------
1337
1338 // Change endianness
ByteSwapSigned(Register reg,int operand_size)1339 void MacroAssembler::ByteSwapSigned(Register reg, int operand_size) {
1340 DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
1341 operand_size == 8);
1342 DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
1343 if (operand_size == 1) {
1344 seb(reg, reg);
1345 sll(reg, reg, 0);
1346 dsbh(reg, reg);
1347 dshd(reg, reg);
1348 } else if (operand_size == 2) {
1349 seh(reg, reg);
1350 sll(reg, reg, 0);
1351 dsbh(reg, reg);
1352 dshd(reg, reg);
1353 } else if (operand_size == 4) {
1354 sll(reg, reg, 0);
1355 dsbh(reg, reg);
1356 dshd(reg, reg);
1357 } else {
1358 dsbh(reg, reg);
1359 dshd(reg, reg);
1360 }
1361 }
1362
ByteSwapUnsigned(Register reg,int operand_size)1363 void MacroAssembler::ByteSwapUnsigned(Register reg, int operand_size) {
1364 DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
1365 if (operand_size == 1) {
1366 andi(reg, reg, 0xFF);
1367 dsbh(reg, reg);
1368 dshd(reg, reg);
1369 } else if (operand_size == 2) {
1370 andi(reg, reg, 0xFFFF);
1371 dsbh(reg, reg);
1372 dshd(reg, reg);
1373 } else {
1374 dsll32(reg, reg, 0);
1375 dsrl32(reg, reg, 0);
1376 dsbh(reg, reg);
1377 dshd(reg, reg);
1378 }
1379 }
1380
Ulw(Register rd,const MemOperand & rs)1381 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1382 DCHECK(!rd.is(at));
1383 DCHECK(!rs.rm().is(at));
1384 if (kArchVariant == kMips64r6) {
1385 lw(rd, rs);
1386 } else {
1387 DCHECK(kArchVariant == kMips64r2);
1388 if (is_int16(rs.offset() + kMipsLwrOffset) &&
1389 is_int16(rs.offset() + kMipsLwlOffset)) {
1390 if (!rd.is(rs.rm())) {
1391 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1392 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1393 } else {
1394 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1395 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1396 mov(rd, at);
1397 }
1398 } else { // Offset > 16 bits, use multiple instructions to load.
1399 LoadRegPlusOffsetToAt(rs);
1400 lwr(rd, MemOperand(at, kMipsLwrOffset));
1401 lwl(rd, MemOperand(at, kMipsLwlOffset));
1402 }
1403 }
1404 }
1405
Ulwu(Register rd,const MemOperand & rs)1406 void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
1407 if (kArchVariant == kMips64r6) {
1408 lwu(rd, rs);
1409 } else {
1410 DCHECK(kArchVariant == kMips64r2);
1411 Ulw(rd, rs);
1412 Dext(rd, rd, 0, 32);
1413 }
1414 }
1415
1416
Usw(Register rd,const MemOperand & rs)1417 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1418 DCHECK(!rd.is(at));
1419 DCHECK(!rs.rm().is(at));
1420 if (kArchVariant == kMips64r6) {
1421 sw(rd, rs);
1422 } else {
1423 DCHECK(kArchVariant == kMips64r2);
1424 if (is_int16(rs.offset() + kMipsSwrOffset) &&
1425 is_int16(rs.offset() + kMipsSwlOffset)) {
1426 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1427 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1428 } else {
1429 LoadRegPlusOffsetToAt(rs);
1430 swr(rd, MemOperand(at, kMipsSwrOffset));
1431 swl(rd, MemOperand(at, kMipsSwlOffset));
1432 }
1433 }
1434 }
1435
Ulh(Register rd,const MemOperand & rs)1436 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1437 DCHECK(!rd.is(at));
1438 DCHECK(!rs.rm().is(at));
1439 if (kArchVariant == kMips64r6) {
1440 lh(rd, rs);
1441 } else {
1442 DCHECK(kArchVariant == kMips64r2);
1443 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1444 #if defined(V8_TARGET_LITTLE_ENDIAN)
1445 lbu(at, rs);
1446 lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1447 #elif defined(V8_TARGET_BIG_ENDIAN)
1448 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1449 lb(rd, rs);
1450 #endif
1451 } else { // Offset > 16 bits, use multiple instructions to load.
1452 LoadRegPlusOffsetToAt(rs);
1453 #if defined(V8_TARGET_LITTLE_ENDIAN)
1454 lb(rd, MemOperand(at, 1));
1455 lbu(at, MemOperand(at, 0));
1456 #elif defined(V8_TARGET_BIG_ENDIAN)
1457 lb(rd, MemOperand(at, 0));
1458 lbu(at, MemOperand(at, 1));
1459 #endif
1460 }
1461 dsll(rd, rd, 8);
1462 or_(rd, rd, at);
1463 }
1464 }
1465
Ulhu(Register rd,const MemOperand & rs)1466 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1467 DCHECK(!rd.is(at));
1468 DCHECK(!rs.rm().is(at));
1469 if (kArchVariant == kMips64r6) {
1470 lhu(rd, rs);
1471 } else {
1472 DCHECK(kArchVariant == kMips64r2);
1473 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1474 #if defined(V8_TARGET_LITTLE_ENDIAN)
1475 lbu(at, rs);
1476 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1477 #elif defined(V8_TARGET_BIG_ENDIAN)
1478 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1479 lbu(rd, rs);
1480 #endif
1481 } else { // Offset > 16 bits, use multiple instructions to load.
1482 LoadRegPlusOffsetToAt(rs);
1483 #if defined(V8_TARGET_LITTLE_ENDIAN)
1484 lbu(rd, MemOperand(at, 1));
1485 lbu(at, MemOperand(at, 0));
1486 #elif defined(V8_TARGET_BIG_ENDIAN)
1487 lbu(rd, MemOperand(at, 0));
1488 lbu(at, MemOperand(at, 1));
1489 #endif
1490 }
1491 dsll(rd, rd, 8);
1492 or_(rd, rd, at);
1493 }
1494 }
1495
Ush(Register rd,const MemOperand & rs,Register scratch)1496 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1497 DCHECK(!rd.is(at));
1498 DCHECK(!rs.rm().is(at));
1499 DCHECK(!rs.rm().is(scratch));
1500 DCHECK(!scratch.is(at));
1501 if (kArchVariant == kMips64r6) {
1502 sh(rd, rs);
1503 } else {
1504 DCHECK(kArchVariant == kMips64r2);
1505 MemOperand source = rs;
1506 // If offset > 16 bits, load address to at with offset 0.
1507 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1508 LoadRegPlusOffsetToAt(rs);
1509 source = MemOperand(at, 0);
1510 }
1511
1512 if (!scratch.is(rd)) {
1513 mov(scratch, rd);
1514 }
1515
1516 #if defined(V8_TARGET_LITTLE_ENDIAN)
1517 sb(scratch, source);
1518 srl(scratch, scratch, 8);
1519 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1520 #elif defined(V8_TARGET_BIG_ENDIAN)
1521 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1522 srl(scratch, scratch, 8);
1523 sb(scratch, source);
1524 #endif
1525 }
1526 }
1527
Uld(Register rd,const MemOperand & rs)1528 void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
1529 DCHECK(!rd.is(at));
1530 DCHECK(!rs.rm().is(at));
1531 if (kArchVariant == kMips64r6) {
1532 ld(rd, rs);
1533 } else {
1534 DCHECK(kArchVariant == kMips64r2);
1535 if (is_int16(rs.offset() + kMipsLdrOffset) &&
1536 is_int16(rs.offset() + kMipsLdlOffset)) {
1537 if (!rd.is(rs.rm())) {
1538 ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1539 ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1540 } else {
1541 ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1542 ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1543 mov(rd, at);
1544 }
1545 } else { // Offset > 16 bits, use multiple instructions to load.
1546 LoadRegPlusOffsetToAt(rs);
1547 ldr(rd, MemOperand(at, kMipsLdrOffset));
1548 ldl(rd, MemOperand(at, kMipsLdlOffset));
1549 }
1550 }
1551 }
1552
1553
1554 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1555 // bits,
1556 // second word in high bits.
LoadWordPair(Register rd,const MemOperand & rs,Register scratch)1557 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1558 Register scratch) {
1559 lwu(rd, rs);
1560 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1561 dsll32(scratch, scratch, 0);
1562 Daddu(rd, rd, scratch);
1563 }
1564
Usd(Register rd,const MemOperand & rs)1565 void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
1566 DCHECK(!rd.is(at));
1567 DCHECK(!rs.rm().is(at));
1568 if (kArchVariant == kMips64r6) {
1569 sd(rd, rs);
1570 } else {
1571 DCHECK(kArchVariant == kMips64r2);
1572 if (is_int16(rs.offset() + kMipsSdrOffset) &&
1573 is_int16(rs.offset() + kMipsSdlOffset)) {
1574 sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
1575 sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
1576 } else {
1577 LoadRegPlusOffsetToAt(rs);
1578 sdr(rd, MemOperand(at, kMipsSdrOffset));
1579 sdl(rd, MemOperand(at, kMipsSdlOffset));
1580 }
1581 }
1582 }
1583
1584
1585 // Do 64-bit store as two consequent 32-bit stores to unaligned address.
StoreWordPair(Register rd,const MemOperand & rs,Register scratch)1586 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1587 Register scratch) {
1588 sw(rd, rs);
1589 dsrl32(scratch, rd, 0);
1590 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1591 }
1592
Ulwc1(FPURegister fd,const MemOperand & rs,Register scratch)1593 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1594 Register scratch) {
1595 if (kArchVariant == kMips64r6) {
1596 lwc1(fd, rs);
1597 } else {
1598 DCHECK(kArchVariant == kMips64r2);
1599 Ulw(scratch, rs);
1600 mtc1(scratch, fd);
1601 }
1602 }
1603
Uswc1(FPURegister fd,const MemOperand & rs,Register scratch)1604 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1605 Register scratch) {
1606 if (kArchVariant == kMips64r6) {
1607 swc1(fd, rs);
1608 } else {
1609 DCHECK(kArchVariant == kMips64r2);
1610 mfc1(scratch, fd);
1611 Usw(scratch, rs);
1612 }
1613 }
1614
Uldc1(FPURegister fd,const MemOperand & rs,Register scratch)1615 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1616 Register scratch) {
1617 DCHECK(!scratch.is(at));
1618 if (kArchVariant == kMips64r6) {
1619 ldc1(fd, rs);
1620 } else {
1621 DCHECK(kArchVariant == kMips64r2);
1622 Uld(scratch, rs);
1623 dmtc1(scratch, fd);
1624 }
1625 }
1626
Usdc1(FPURegister fd,const MemOperand & rs,Register scratch)1627 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1628 Register scratch) {
1629 DCHECK(!scratch.is(at));
1630 if (kArchVariant == kMips64r6) {
1631 sdc1(fd, rs);
1632 } else {
1633 DCHECK(kArchVariant == kMips64r2);
1634 dmfc1(scratch, fd);
1635 Usd(scratch, rs);
1636 }
1637 }
1638
li(Register dst,Handle<Object> value,LiFlags mode)1639 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1640 AllowDeferredHandleDereference smi_check;
1641 if (value->IsSmi()) {
1642 li(dst, Operand(value), mode);
1643 } else {
1644 DCHECK(value->IsHeapObject());
1645 if (isolate()->heap()->InNewSpace(*value)) {
1646 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1647 li(dst, Operand(cell));
1648 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1649 } else {
1650 li(dst, Operand(value));
1651 }
1652 }
1653 }
1654
ShiftAndFixSignExtension(int64_t imm,int bitnum)1655 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
1656 if ((imm >> (bitnum - 1)) & 0x1) {
1657 imm = (imm >> bitnum) + 1;
1658 } else {
1659 imm = imm >> bitnum;
1660 }
1661 return imm;
1662 }
1663
LiLower32BitHelper(Register rd,Operand j)1664 bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1665 bool higher_bits_sign_extended = false;
1666 if (is_int16(j.imm64_)) {
1667 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1668 } else if (!(j.imm64_ & kHiMask)) {
1669 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1670 } else if (!(j.imm64_ & kImm16Mask)) {
1671 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1672 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1673 higher_bits_sign_extended = true;
1674 }
1675 } else {
1676 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1677 ori(rd, rd, (j.imm64_ & kImm16Mask));
1678 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1679 higher_bits_sign_extended = true;
1680 }
1681 }
1682 return higher_bits_sign_extended;
1683 }
1684
li(Register rd,Operand j,LiFlags mode)1685 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1686 DCHECK(!j.is_reg());
1687 BlockTrampolinePoolScope block_trampoline_pool(this);
1688 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1689 // Normal load of an immediate value which does not need Relocation Info.
1690 if (is_int32(j.imm64_)) {
1691 LiLower32BitHelper(rd, j);
1692 } else {
1693 if (kArchVariant == kMips64r6) {
1694 int64_t imm = j.imm64_;
1695 bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
1696 imm = ShiftAndFixSignExtension(imm, 32);
1697 // If LUI writes 1s to higher bits, we need both DAHI/DATI.
1698 if ((imm & kImm16Mask) ||
1699 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1700 dahi(rd, imm & kImm16Mask);
1701 }
1702 imm = ShiftAndFixSignExtension(imm, 16);
1703 if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
1704 (higher_bits_sign_extended && (j.imm64_ > 0))) {
1705 dati(rd, imm & kImm16Mask);
1706 }
1707 } else {
1708 if (is_int48(j.imm64_)) {
1709 if ((j.imm64_ >> 32) & kImm16Mask) {
1710 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1711 if ((j.imm64_ >> 16) & kImm16Mask) {
1712 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1713 }
1714 } else {
1715 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
1716 }
1717 dsll(rd, rd, 16);
1718 if (j.imm64_ & kImm16Mask) {
1719 ori(rd, rd, j.imm64_ & kImm16Mask);
1720 }
1721 } else {
1722 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1723 if ((j.imm64_ >> 32) & kImm16Mask) {
1724 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1725 }
1726 if ((j.imm64_ >> 16) & kImm16Mask) {
1727 dsll(rd, rd, 16);
1728 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1729 if (j.imm64_ & kImm16Mask) {
1730 dsll(rd, rd, 16);
1731 ori(rd, rd, j.imm64_ & kImm16Mask);
1732 } else {
1733 dsll(rd, rd, 16);
1734 }
1735 } else {
1736 if (j.imm64_ & kImm16Mask) {
1737 dsll32(rd, rd, 0);
1738 ori(rd, rd, j.imm64_ & kImm16Mask);
1739 } else {
1740 dsll32(rd, rd, 0);
1741 }
1742 }
1743 }
1744 }
1745 }
1746 } else if (MustUseReg(j.rmode_)) {
1747 RecordRelocInfo(j.rmode_, j.imm64_);
1748 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1749 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1750 dsll(rd, rd, 16);
1751 ori(rd, rd, j.imm64_ & kImm16Mask);
1752 } else if (mode == ADDRESS_LOAD) {
1753 // We always need the same number of instructions as we may need to patch
1754 // this code to load another value which may need all 4 instructions.
1755 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1756 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1757 dsll(rd, rd, 16);
1758 ori(rd, rd, j.imm64_ & kImm16Mask);
1759 } else {
1760 if (kArchVariant == kMips64r6) {
1761 int64_t imm = j.imm64_;
1762 lui(rd, (imm >> kLuiShift) & kImm16Mask);
1763 if (imm & kImm16Mask) {
1764 ori(rd, rd, (imm & kImm16Mask));
1765 }
1766 if ((imm >> 31) & 0x1) {
1767 imm = (imm >> 32) + 1;
1768 } else {
1769 imm = imm >> 32;
1770 }
1771 dahi(rd, imm & kImm16Mask);
1772 if ((imm >> 15) & 0x1) {
1773 imm = (imm >> 16) + 1;
1774 } else {
1775 imm = imm >> 16;
1776 }
1777 dati(rd, imm & kImm16Mask);
1778 } else {
1779 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1780 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1781 dsll(rd, rd, 16);
1782 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1783 dsll(rd, rd, 16);
1784 ori(rd, rd, j.imm64_ & kImm16Mask);
1785 }
1786 }
1787 }
1788
1789
MultiPush(RegList regs)1790 void MacroAssembler::MultiPush(RegList regs) {
1791 int16_t num_to_push = NumberOfBitsSet(regs);
1792 int16_t stack_offset = num_to_push * kPointerSize;
1793
1794 Dsubu(sp, sp, Operand(stack_offset));
1795 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1796 if ((regs & (1 << i)) != 0) {
1797 stack_offset -= kPointerSize;
1798 sd(ToRegister(i), MemOperand(sp, stack_offset));
1799 }
1800 }
1801 }
1802
1803
MultiPushReversed(RegList regs)1804 void MacroAssembler::MultiPushReversed(RegList regs) {
1805 int16_t num_to_push = NumberOfBitsSet(regs);
1806 int16_t stack_offset = num_to_push * kPointerSize;
1807
1808 Dsubu(sp, sp, Operand(stack_offset));
1809 for (int16_t i = 0; i < kNumRegisters; i++) {
1810 if ((regs & (1 << i)) != 0) {
1811 stack_offset -= kPointerSize;
1812 sd(ToRegister(i), MemOperand(sp, stack_offset));
1813 }
1814 }
1815 }
1816
1817
MultiPop(RegList regs)1818 void MacroAssembler::MultiPop(RegList regs) {
1819 int16_t stack_offset = 0;
1820
1821 for (int16_t i = 0; i < kNumRegisters; i++) {
1822 if ((regs & (1 << i)) != 0) {
1823 ld(ToRegister(i), MemOperand(sp, stack_offset));
1824 stack_offset += kPointerSize;
1825 }
1826 }
1827 daddiu(sp, sp, stack_offset);
1828 }
1829
1830
MultiPopReversed(RegList regs)1831 void MacroAssembler::MultiPopReversed(RegList regs) {
1832 int16_t stack_offset = 0;
1833
1834 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1835 if ((regs & (1 << i)) != 0) {
1836 ld(ToRegister(i), MemOperand(sp, stack_offset));
1837 stack_offset += kPointerSize;
1838 }
1839 }
1840 daddiu(sp, sp, stack_offset);
1841 }
1842
1843
MultiPushFPU(RegList regs)1844 void MacroAssembler::MultiPushFPU(RegList regs) {
1845 int16_t num_to_push = NumberOfBitsSet(regs);
1846 int16_t stack_offset = num_to_push * kDoubleSize;
1847
1848 Dsubu(sp, sp, Operand(stack_offset));
1849 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1850 if ((regs & (1 << i)) != 0) {
1851 stack_offset -= kDoubleSize;
1852 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1853 }
1854 }
1855 }
1856
1857
MultiPushReversedFPU(RegList regs)1858 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1859 int16_t num_to_push = NumberOfBitsSet(regs);
1860 int16_t stack_offset = num_to_push * kDoubleSize;
1861
1862 Dsubu(sp, sp, Operand(stack_offset));
1863 for (int16_t i = 0; i < kNumRegisters; i++) {
1864 if ((regs & (1 << i)) != 0) {
1865 stack_offset -= kDoubleSize;
1866 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1867 }
1868 }
1869 }
1870
1871
MultiPopFPU(RegList regs)1872 void MacroAssembler::MultiPopFPU(RegList regs) {
1873 int16_t stack_offset = 0;
1874
1875 for (int16_t i = 0; i < kNumRegisters; i++) {
1876 if ((regs & (1 << i)) != 0) {
1877 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1878 stack_offset += kDoubleSize;
1879 }
1880 }
1881 daddiu(sp, sp, stack_offset);
1882 }
1883
1884
MultiPopReversedFPU(RegList regs)1885 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1886 int16_t stack_offset = 0;
1887
1888 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1889 if ((regs & (1 << i)) != 0) {
1890 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1891 stack_offset += kDoubleSize;
1892 }
1893 }
1894 daddiu(sp, sp, stack_offset);
1895 }
1896
1897
Ext(Register rt,Register rs,uint16_t pos,uint16_t size)1898 void MacroAssembler::Ext(Register rt,
1899 Register rs,
1900 uint16_t pos,
1901 uint16_t size) {
1902 DCHECK(pos < 32);
1903 DCHECK(pos + size < 33);
1904 ext_(rt, rs, pos, size);
1905 }
1906
1907
Dext(Register rt,Register rs,uint16_t pos,uint16_t size)1908 void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1909 uint16_t size) {
1910 DCHECK(pos < 32);
1911 DCHECK(pos + size < 33);
1912 dext_(rt, rs, pos, size);
1913 }
1914
1915
Dextm(Register rt,Register rs,uint16_t pos,uint16_t size)1916 void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
1917 uint16_t size) {
1918 DCHECK(pos < 32);
1919 DCHECK(size <= 64);
1920 dextm(rt, rs, pos, size);
1921 }
1922
1923
Dextu(Register rt,Register rs,uint16_t pos,uint16_t size)1924 void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
1925 uint16_t size) {
1926 DCHECK(pos >= 32 && pos < 64);
1927 DCHECK(size < 33);
1928 dextu(rt, rs, pos, size);
1929 }
1930
1931
Dins(Register rt,Register rs,uint16_t pos,uint16_t size)1932 void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
1933 uint16_t size) {
1934 DCHECK(pos < 32);
1935 DCHECK(pos + size <= 32);
1936 DCHECK(size != 0);
1937 dins_(rt, rs, pos, size);
1938 }
1939
1940
Ins(Register rt,Register rs,uint16_t pos,uint16_t size)1941 void MacroAssembler::Ins(Register rt,
1942 Register rs,
1943 uint16_t pos,
1944 uint16_t size) {
1945 DCHECK(pos < 32);
1946 DCHECK(pos + size <= 32);
1947 DCHECK(size != 0);
1948 ins_(rt, rs, pos, size);
1949 }
1950
1951
Cvt_d_uw(FPURegister fd,FPURegister fs)1952 void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
1953 // Move the data from fs to t8.
1954 mfc1(t8, fs);
1955 Cvt_d_uw(fd, t8);
1956 }
1957
1958
Cvt_d_uw(FPURegister fd,Register rs)1959 void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
1960 // Convert rs to a FP value in fd.
1961 DCHECK(!rs.is(t9));
1962 DCHECK(!rs.is(at));
1963
1964 // Zero extend int32 in rs.
1965 Dext(t9, rs, 0, 32);
1966 dmtc1(t9, fd);
1967 cvt_d_l(fd, fd);
1968 }
1969
1970
Cvt_d_ul(FPURegister fd,FPURegister fs)1971 void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
1972 // Move the data from fs to t8.
1973 dmfc1(t8, fs);
1974 Cvt_d_ul(fd, t8);
1975 }
1976
1977
Cvt_d_ul(FPURegister fd,Register rs)1978 void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
1979 // Convert rs to a FP value in fd.
1980
1981 DCHECK(!rs.is(t9));
1982 DCHECK(!rs.is(at));
1983
1984 Label msb_clear, conversion_done;
1985
1986 Branch(&msb_clear, ge, rs, Operand(zero_reg));
1987
1988 // Rs >= 2^63
1989 andi(t9, rs, 1);
1990 dsrl(rs, rs, 1);
1991 or_(t9, t9, rs);
1992 dmtc1(t9, fd);
1993 cvt_d_l(fd, fd);
1994 Branch(USE_DELAY_SLOT, &conversion_done);
1995 add_d(fd, fd, fd); // In delay slot.
1996
1997 bind(&msb_clear);
1998 // Rs < 2^63, we can do simple conversion.
1999 dmtc1(rs, fd);
2000 cvt_d_l(fd, fd);
2001
2002 bind(&conversion_done);
2003 }
2004
Cvt_s_uw(FPURegister fd,FPURegister fs)2005 void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
2006 // Move the data from fs to t8.
2007 mfc1(t8, fs);
2008 Cvt_s_uw(fd, t8);
2009 }
2010
Cvt_s_uw(FPURegister fd,Register rs)2011 void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
2012 // Convert rs to a FP value in fd.
2013 DCHECK(!rs.is(t9));
2014 DCHECK(!rs.is(at));
2015
2016 // Zero extend int32 in rs.
2017 Dext(t9, rs, 0, 32);
2018 dmtc1(t9, fd);
2019 cvt_s_l(fd, fd);
2020 }
2021
Cvt_s_ul(FPURegister fd,FPURegister fs)2022 void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
2023 // Move the data from fs to t8.
2024 dmfc1(t8, fs);
2025 Cvt_s_ul(fd, t8);
2026 }
2027
2028
Cvt_s_ul(FPURegister fd,Register rs)2029 void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
2030 // Convert rs to a FP value in fd.
2031
2032 DCHECK(!rs.is(t9));
2033 DCHECK(!rs.is(at));
2034
2035 Label positive, conversion_done;
2036
2037 Branch(&positive, ge, rs, Operand(zero_reg));
2038
2039 // Rs >= 2^31.
2040 andi(t9, rs, 1);
2041 dsrl(rs, rs, 1);
2042 or_(t9, t9, rs);
2043 dmtc1(t9, fd);
2044 cvt_s_l(fd, fd);
2045 Branch(USE_DELAY_SLOT, &conversion_done);
2046 add_s(fd, fd, fd); // In delay slot.
2047
2048 bind(&positive);
2049 // Rs < 2^31, we can do simple conversion.
2050 dmtc1(rs, fd);
2051 cvt_s_l(fd, fd);
2052
2053 bind(&conversion_done);
2054 }
2055
2056
Round_l_d(FPURegister fd,FPURegister fs)2057 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
2058 round_l_d(fd, fs);
2059 }
2060
2061
Floor_l_d(FPURegister fd,FPURegister fs)2062 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
2063 floor_l_d(fd, fs);
2064 }
2065
2066
Ceil_l_d(FPURegister fd,FPURegister fs)2067 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
2068 ceil_l_d(fd, fs);
2069 }
2070
2071
Trunc_l_d(FPURegister fd,FPURegister fs)2072 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
2073 trunc_l_d(fd, fs);
2074 }
2075
2076
Trunc_l_ud(FPURegister fd,FPURegister fs,FPURegister scratch)2077 void MacroAssembler::Trunc_l_ud(FPURegister fd,
2078 FPURegister fs,
2079 FPURegister scratch) {
2080 // Load to GPR.
2081 dmfc1(t8, fs);
2082 // Reset sign bit.
2083 li(at, 0x7fffffffffffffff);
2084 and_(t8, t8, at);
2085 dmtc1(t8, fs);
2086 trunc_l_d(fd, fs);
2087 }
2088
2089
Trunc_uw_d(FPURegister fd,FPURegister fs,FPURegister scratch)2090 void MacroAssembler::Trunc_uw_d(FPURegister fd,
2091 FPURegister fs,
2092 FPURegister scratch) {
2093 Trunc_uw_d(fs, t8, scratch);
2094 mtc1(t8, fd);
2095 }
2096
Trunc_uw_s(FPURegister fd,FPURegister fs,FPURegister scratch)2097 void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
2098 FPURegister scratch) {
2099 Trunc_uw_s(fs, t8, scratch);
2100 mtc1(t8, fd);
2101 }
2102
Trunc_ul_d(FPURegister fd,FPURegister fs,FPURegister scratch,Register result)2103 void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
2104 FPURegister scratch, Register result) {
2105 Trunc_ul_d(fs, t8, scratch, result);
2106 dmtc1(t8, fd);
2107 }
2108
2109
Trunc_ul_s(FPURegister fd,FPURegister fs,FPURegister scratch,Register result)2110 void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
2111 FPURegister scratch, Register result) {
2112 Trunc_ul_s(fs, t8, scratch, result);
2113 dmtc1(t8, fd);
2114 }
2115
2116
Trunc_w_d(FPURegister fd,FPURegister fs)2117 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
2118 trunc_w_d(fd, fs);
2119 }
2120
2121
Round_w_d(FPURegister fd,FPURegister fs)2122 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
2123 round_w_d(fd, fs);
2124 }
2125
2126
Floor_w_d(FPURegister fd,FPURegister fs)2127 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
2128 floor_w_d(fd, fs);
2129 }
2130
2131
Ceil_w_d(FPURegister fd,FPURegister fs)2132 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
2133 ceil_w_d(fd, fs);
2134 }
2135
2136
Trunc_uw_d(FPURegister fd,Register rs,FPURegister scratch)2137 void MacroAssembler::Trunc_uw_d(FPURegister fd,
2138 Register rs,
2139 FPURegister scratch) {
2140 DCHECK(!fd.is(scratch));
2141 DCHECK(!rs.is(at));
2142
2143 // Load 2^31 into scratch as its float representation.
2144 li(at, 0x41E00000);
2145 mtc1(zero_reg, scratch);
2146 mthc1(at, scratch);
2147 // Test if scratch > fd.
2148 // If fd < 2^31 we can convert it normally.
2149 Label simple_convert;
2150 BranchF(&simple_convert, NULL, lt, fd, scratch);
2151
2152 // First we subtract 2^31 from fd, then trunc it to rs
2153 // and add 2^31 to rs.
2154 sub_d(scratch, fd, scratch);
2155 trunc_w_d(scratch, scratch);
2156 mfc1(rs, scratch);
2157 Or(rs, rs, 1 << 31);
2158
2159 Label done;
2160 Branch(&done);
2161 // Simple conversion.
2162 bind(&simple_convert);
2163 trunc_w_d(scratch, fd);
2164 mfc1(rs, scratch);
2165
2166 bind(&done);
2167 }
2168
Trunc_uw_s(FPURegister fd,Register rs,FPURegister scratch)2169 void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
2170 FPURegister scratch) {
2171 DCHECK(!fd.is(scratch));
2172 DCHECK(!rs.is(at));
2173
2174 // Load 2^31 into scratch as its float representation.
2175 li(at, 0x4F000000);
2176 mtc1(at, scratch);
2177 // Test if scratch > fd.
2178 // If fd < 2^31 we can convert it normally.
2179 Label simple_convert;
2180 BranchF32(&simple_convert, NULL, lt, fd, scratch);
2181
2182 // First we subtract 2^31 from fd, then trunc it to rs
2183 // and add 2^31 to rs.
2184 sub_s(scratch, fd, scratch);
2185 trunc_w_s(scratch, scratch);
2186 mfc1(rs, scratch);
2187 Or(rs, rs, 1 << 31);
2188
2189 Label done;
2190 Branch(&done);
2191 // Simple conversion.
2192 bind(&simple_convert);
2193 trunc_w_s(scratch, fd);
2194 mfc1(rs, scratch);
2195
2196 bind(&done);
2197 }
2198
Trunc_ul_d(FPURegister fd,Register rs,FPURegister scratch,Register result)2199 void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
2200 FPURegister scratch, Register result) {
2201 DCHECK(!fd.is(scratch));
2202 DCHECK(!AreAliased(rs, result, at));
2203
2204 Label simple_convert, done, fail;
2205 if (result.is_valid()) {
2206 mov(result, zero_reg);
2207 Move(scratch, -1.0);
2208 // If fd =< -1 or unordered, then the conversion fails.
2209 BranchF(&fail, &fail, le, fd, scratch);
2210 }
2211
2212 // Load 2^63 into scratch as its double representation.
2213 li(at, 0x43e0000000000000);
2214 dmtc1(at, scratch);
2215
2216 // Test if scratch > fd.
2217 // If fd < 2^63 we can convert it normally.
2218 BranchF(&simple_convert, nullptr, lt, fd, scratch);
2219
2220 // First we subtract 2^63 from fd, then trunc it to rs
2221 // and add 2^63 to rs.
2222 sub_d(scratch, fd, scratch);
2223 trunc_l_d(scratch, scratch);
2224 dmfc1(rs, scratch);
2225 Or(rs, rs, Operand(1UL << 63));
2226 Branch(&done);
2227
2228 // Simple conversion.
2229 bind(&simple_convert);
2230 trunc_l_d(scratch, fd);
2231 dmfc1(rs, scratch);
2232
2233 bind(&done);
2234 if (result.is_valid()) {
2235 // Conversion is failed if the result is negative.
2236 addiu(at, zero_reg, -1);
2237 dsrl(at, at, 1); // Load 2^62.
2238 dmfc1(result, scratch);
2239 xor_(result, result, at);
2240 Slt(result, zero_reg, result);
2241 }
2242
2243 bind(&fail);
2244 }
2245
2246
Trunc_ul_s(FPURegister fd,Register rs,FPURegister scratch,Register result)2247 void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
2248 FPURegister scratch, Register result) {
2249 DCHECK(!fd.is(scratch));
2250 DCHECK(!AreAliased(rs, result, at));
2251
2252 Label simple_convert, done, fail;
2253 if (result.is_valid()) {
2254 mov(result, zero_reg);
2255 Move(scratch, -1.0f);
2256 // If fd =< -1 or unordered, then the conversion fails.
2257 BranchF32(&fail, &fail, le, fd, scratch);
2258 }
2259
2260 // Load 2^63 into scratch as its float representation.
2261 li(at, 0x5f000000);
2262 mtc1(at, scratch);
2263
2264 // Test if scratch > fd.
2265 // If fd < 2^63 we can convert it normally.
2266 BranchF32(&simple_convert, nullptr, lt, fd, scratch);
2267
2268 // First we subtract 2^63 from fd, then trunc it to rs
2269 // and add 2^63 to rs.
2270 sub_s(scratch, fd, scratch);
2271 trunc_l_s(scratch, scratch);
2272 dmfc1(rs, scratch);
2273 Or(rs, rs, Operand(1UL << 63));
2274 Branch(&done);
2275
2276 // Simple conversion.
2277 bind(&simple_convert);
2278 trunc_l_s(scratch, fd);
2279 dmfc1(rs, scratch);
2280
2281 bind(&done);
2282 if (result.is_valid()) {
2283 // Conversion is failed if the result is negative or unordered.
2284 addiu(at, zero_reg, -1);
2285 dsrl(at, at, 1); // Load 2^62.
2286 dmfc1(result, scratch);
2287 xor_(result, result, at);
2288 Slt(result, zero_reg, result);
2289 }
2290
2291 bind(&fail);
2292 }
2293
2294
Madd_d(FPURegister fd,FPURegister fr,FPURegister fs,FPURegister ft,FPURegister scratch)2295 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2296 FPURegister ft, FPURegister scratch) {
2297 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
2298 madd_d(fd, fr, fs, ft);
2299 } else {
2300 // Can not change source regs's value.
2301 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2302 mul_d(scratch, fs, ft);
2303 add_d(fd, fr, scratch);
2304 }
2305 }
2306
2307
BranchFCommon(SecondaryField sizeField,Label * target,Label * nan,Condition cond,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2308 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2309 Label* nan, Condition cond, FPURegister cmp1,
2310 FPURegister cmp2, BranchDelaySlot bd) {
2311 BlockTrampolinePoolScope block_trampoline_pool(this);
2312 if (cond == al) {
2313 Branch(bd, target);
2314 return;
2315 }
2316
2317 if (kArchVariant == kMips64r6) {
2318 sizeField = sizeField == D ? L : W;
2319 }
2320
2321 DCHECK(nan || target);
2322 // Check for unordered (NaN) cases.
2323 if (nan) {
2324 bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
2325 if (kArchVariant != kMips64r6) {
2326 if (long_branch) {
2327 Label skip;
2328 c(UN, sizeField, cmp1, cmp2);
2329 bc1f(&skip);
2330 nop();
2331 BranchLong(nan, bd);
2332 bind(&skip);
2333 } else {
2334 c(UN, sizeField, cmp1, cmp2);
2335 bc1t(nan);
2336 if (bd == PROTECT) {
2337 nop();
2338 }
2339 }
2340 } else {
2341 // Use kDoubleCompareReg for comparison result. It has to be unavailable
2342 // to lithium
2343 // register allocator.
2344 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2345 if (long_branch) {
2346 Label skip;
2347 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2348 bc1eqz(&skip, kDoubleCompareReg);
2349 nop();
2350 BranchLong(nan, bd);
2351 bind(&skip);
2352 } else {
2353 cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2354 bc1nez(nan, kDoubleCompareReg);
2355 if (bd == PROTECT) {
2356 nop();
2357 }
2358 }
2359 }
2360 }
2361
2362 if (target) {
2363 bool long_branch =
2364 target->is_bound() ? is_near(target) : is_trampoline_emitted();
2365 if (long_branch) {
2366 Label skip;
2367 Condition neg_cond = NegateFpuCondition(cond);
2368 BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2369 BranchLong(target, bd);
2370 bind(&skip);
2371 } else {
2372 BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2373 }
2374 }
2375 }
2376
2377
BranchShortF(SecondaryField sizeField,Label * target,Condition cc,FPURegister cmp1,FPURegister cmp2,BranchDelaySlot bd)2378 void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2379 Condition cc, FPURegister cmp1,
2380 FPURegister cmp2, BranchDelaySlot bd) {
2381 if (kArchVariant != kMips64r6) {
2382 BlockTrampolinePoolScope block_trampoline_pool(this);
2383 if (target) {
2384 // Here NaN cases were either handled by this function or are assumed to
2385 // have been handled by the caller.
2386 switch (cc) {
2387 case lt:
2388 c(OLT, sizeField, cmp1, cmp2);
2389 bc1t(target);
2390 break;
2391 case ult:
2392 c(ULT, sizeField, cmp1, cmp2);
2393 bc1t(target);
2394 break;
2395 case gt:
2396 c(ULE, sizeField, cmp1, cmp2);
2397 bc1f(target);
2398 break;
2399 case ugt:
2400 c(OLE, sizeField, cmp1, cmp2);
2401 bc1f(target);
2402 break;
2403 case ge:
2404 c(ULT, sizeField, cmp1, cmp2);
2405 bc1f(target);
2406 break;
2407 case uge:
2408 c(OLT, sizeField, cmp1, cmp2);
2409 bc1f(target);
2410 break;
2411 case le:
2412 c(OLE, sizeField, cmp1, cmp2);
2413 bc1t(target);
2414 break;
2415 case ule:
2416 c(ULE, sizeField, cmp1, cmp2);
2417 bc1t(target);
2418 break;
2419 case eq:
2420 c(EQ, sizeField, cmp1, cmp2);
2421 bc1t(target);
2422 break;
2423 case ueq:
2424 c(UEQ, sizeField, cmp1, cmp2);
2425 bc1t(target);
2426 break;
2427 case ne: // Unordered or not equal.
2428 c(EQ, sizeField, cmp1, cmp2);
2429 bc1f(target);
2430 break;
2431 case ogl:
2432 c(UEQ, sizeField, cmp1, cmp2);
2433 bc1f(target);
2434 break;
2435 default:
2436 CHECK(0);
2437 }
2438 }
2439 } else {
2440 BlockTrampolinePoolScope block_trampoline_pool(this);
2441 if (target) {
2442 // Here NaN cases were either handled by this function or are assumed to
2443 // have been handled by the caller.
2444 // Unsigned conditions are treated as their signed counterpart.
2445 // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
2446 // 1) mode.
2447 DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2448 switch (cc) {
2449 case lt:
2450 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2451 bc1nez(target, kDoubleCompareReg);
2452 break;
2453 case ult:
2454 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2455 bc1nez(target, kDoubleCompareReg);
2456 break;
2457 case gt:
2458 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2459 bc1eqz(target, kDoubleCompareReg);
2460 break;
2461 case ugt:
2462 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2463 bc1eqz(target, kDoubleCompareReg);
2464 break;
2465 case ge:
2466 cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2467 bc1eqz(target, kDoubleCompareReg);
2468 break;
2469 case uge:
2470 cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2471 bc1eqz(target, kDoubleCompareReg);
2472 break;
2473 case le:
2474 cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2475 bc1nez(target, kDoubleCompareReg);
2476 break;
2477 case ule:
2478 cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2479 bc1nez(target, kDoubleCompareReg);
2480 break;
2481 case eq:
2482 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2483 bc1nez(target, kDoubleCompareReg);
2484 break;
2485 case ueq:
2486 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2487 bc1nez(target, kDoubleCompareReg);
2488 break;
2489 case ne:
2490 cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2491 bc1eqz(target, kDoubleCompareReg);
2492 break;
2493 case ogl:
2494 cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2495 bc1eqz(target, kDoubleCompareReg);
2496 break;
2497 default:
2498 CHECK(0);
2499 }
2500 }
2501 }
2502
2503 if (bd == PROTECT) {
2504 nop();
2505 }
2506 }
2507
2508
FmoveLow(FPURegister dst,Register src_low)2509 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2510 DCHECK(!src_low.is(at));
2511 mfhc1(at, dst);
2512 mtc1(src_low, dst);
2513 mthc1(at, dst);
2514 }
2515
2516
Move(FPURegister dst,float imm)2517 void MacroAssembler::Move(FPURegister dst, float imm) {
2518 li(at, Operand(bit_cast<int32_t>(imm)));
2519 mtc1(at, dst);
2520 }
2521
2522
Move(FPURegister dst,double imm)2523 void MacroAssembler::Move(FPURegister dst, double imm) {
2524 static const DoubleRepresentation minus_zero(-0.0);
2525 static const DoubleRepresentation zero(0.0);
2526 DoubleRepresentation value_rep(imm);
2527 // Handle special values first.
2528 if (value_rep == zero && has_double_zero_reg_set_) {
2529 mov_d(dst, kDoubleRegZero);
2530 } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
2531 neg_d(dst, kDoubleRegZero);
2532 } else {
2533 uint32_t lo, hi;
2534 DoubleAsTwoUInt32(imm, &lo, &hi);
2535 // Move the low part of the double into the lower bits of the corresponding
2536 // FPU register.
2537 if (lo != 0) {
2538 if (!(lo & kImm16Mask)) {
2539 lui(at, (lo >> kLuiShift) & kImm16Mask);
2540 mtc1(at, dst);
2541 } else if (!(lo & kHiMask)) {
2542 ori(at, zero_reg, lo & kImm16Mask);
2543 mtc1(at, dst);
2544 } else {
2545 lui(at, (lo >> kLuiShift) & kImm16Mask);
2546 ori(at, at, lo & kImm16Mask);
2547 mtc1(at, dst);
2548 }
2549 } else {
2550 mtc1(zero_reg, dst);
2551 }
2552 // Move the high part of the double into the high bits of the corresponding
2553 // FPU register.
2554 if (hi != 0) {
2555 if (!(hi & kImm16Mask)) {
2556 lui(at, (hi >> kLuiShift) & kImm16Mask);
2557 mthc1(at, dst);
2558 } else if (!(hi & kHiMask)) {
2559 ori(at, zero_reg, hi & kImm16Mask);
2560 mthc1(at, dst);
2561 } else {
2562 lui(at, (hi >> kLuiShift) & kImm16Mask);
2563 ori(at, at, hi & kImm16Mask);
2564 mthc1(at, dst);
2565 }
2566 } else {
2567 mthc1(zero_reg, dst);
2568 }
2569 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
2570 }
2571 }
2572
2573
Movz(Register rd,Register rs,Register rt)2574 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2575 if (kArchVariant == kMips64r6) {
2576 Label done;
2577 Branch(&done, ne, rt, Operand(zero_reg));
2578 mov(rd, rs);
2579 bind(&done);
2580 } else {
2581 movz(rd, rs, rt);
2582 }
2583 }
2584
2585
Movn(Register rd,Register rs,Register rt)2586 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2587 if (kArchVariant == kMips64r6) {
2588 Label done;
2589 Branch(&done, eq, rt, Operand(zero_reg));
2590 mov(rd, rs);
2591 bind(&done);
2592 } else {
2593 movn(rd, rs, rt);
2594 }
2595 }
2596
2597
Movt(Register rd,Register rs,uint16_t cc)2598 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2599 movt(rd, rs, cc);
2600 }
2601
2602
Movf(Register rd,Register rs,uint16_t cc)2603 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2604 movf(rd, rs, cc);
2605 }
2606
2607 #define __ masm->
2608
ZeroHelper_d(MacroAssembler * masm,MaxMinKind kind,FPURegister dst,FPURegister src1,FPURegister src2,Label * equal)2609 static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2610 FPURegister src1, FPURegister src2, Label* equal) {
2611 if (src1.is(src2)) {
2612 __ Move(dst, src1);
2613 return true;
2614 }
2615
2616 Label other, compare_not_equal;
2617 FPURegister left, right;
2618 if (kind == MaxMinKind::kMin) {
2619 left = src1;
2620 right = src2;
2621 } else {
2622 left = src2;
2623 right = src1;
2624 }
2625
2626 __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
2627 // Left and right hand side are equal, check for -0 vs. +0.
2628 __ dmfc1(t8, src1);
2629 __ Branch(&other, eq, t8, Operand(0x8000000000000000));
2630 __ Move_d(dst, right);
2631 __ Branch(equal);
2632 __ bind(&other);
2633 __ Move_d(dst, left);
2634 __ Branch(equal);
2635 __ bind(&compare_not_equal);
2636 return false;
2637 }
2638
ZeroHelper_s(MacroAssembler * masm,MaxMinKind kind,FPURegister dst,FPURegister src1,FPURegister src2,Label * equal)2639 static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
2640 FPURegister src1, FPURegister src2, Label* equal) {
2641 if (src1.is(src2)) {
2642 __ Move(dst, src1);
2643 return true;
2644 }
2645
2646 Label other, compare_not_equal;
2647 FPURegister left, right;
2648 if (kind == MaxMinKind::kMin) {
2649 left = src1;
2650 right = src2;
2651 } else {
2652 left = src2;
2653 right = src1;
2654 }
2655
2656 __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
2657 // Left and right hand side are equal, check for -0 vs. +0.
2658 __ FmoveLow(t8, src1);
2659 __ dsll32(t8, t8, 0);
2660 __ Branch(&other, eq, t8, Operand(0x8000000000000000));
2661 __ Move_s(dst, right);
2662 __ Branch(equal);
2663 __ bind(&other);
2664 __ Move_s(dst, left);
2665 __ Branch(equal);
2666 __ bind(&compare_not_equal);
2667 return false;
2668 }
2669
2670 #undef __
2671
MinNaNCheck_d(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2672 void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
2673 FPURegister src2, Label* nan) {
2674 if (nan) {
2675 BranchF64(nullptr, nan, eq, src1, src2);
2676 }
2677 if (kArchVariant >= kMips64r6) {
2678 min_d(dst, src1, src2);
2679 } else {
2680 Label skip;
2681 if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2682 if (dst.is(src1)) {
2683 BranchF64(&skip, nullptr, le, src1, src2);
2684 Move_d(dst, src2);
2685 } else if (dst.is(src2)) {
2686 BranchF64(&skip, nullptr, ge, src1, src2);
2687 Move_d(dst, src1);
2688 } else {
2689 Label right;
2690 BranchF64(&right, nullptr, gt, src1, src2);
2691 Move_d(dst, src1);
2692 Branch(&skip);
2693 bind(&right);
2694 Move_d(dst, src2);
2695 }
2696 }
2697 bind(&skip);
2698 }
2699 }
2700
MaxNaNCheck_d(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2701 void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
2702 FPURegister src2, Label* nan) {
2703 if (nan) {
2704 BranchF64(nullptr, nan, eq, src1, src2);
2705 }
2706 if (kArchVariant >= kMips64r6) {
2707 max_d(dst, src1, src2);
2708 } else {
2709 Label skip;
2710 if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2711 if (dst.is(src1)) {
2712 BranchF64(&skip, nullptr, ge, src1, src2);
2713 Move_d(dst, src2);
2714 } else if (dst.is(src2)) {
2715 BranchF64(&skip, nullptr, le, src1, src2);
2716 Move_d(dst, src1);
2717 } else {
2718 Label right;
2719 BranchF64(&right, nullptr, lt, src1, src2);
2720 Move_d(dst, src1);
2721 Branch(&skip);
2722 bind(&right);
2723 Move_d(dst, src2);
2724 }
2725 }
2726 bind(&skip);
2727 }
2728 }
2729
MinNaNCheck_s(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2730 void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
2731 FPURegister src2, Label* nan) {
2732 if (nan) {
2733 BranchF32(nullptr, nan, eq, src1, src2);
2734 }
2735 if (kArchVariant >= kMips64r6) {
2736 min_s(dst, src1, src2);
2737 } else {
2738 Label skip;
2739 if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
2740 if (dst.is(src1)) {
2741 BranchF32(&skip, nullptr, le, src1, src2);
2742 Move_s(dst, src2);
2743 } else if (dst.is(src2)) {
2744 BranchF32(&skip, nullptr, ge, src1, src2);
2745 Move_s(dst, src1);
2746 } else {
2747 Label right;
2748 BranchF32(&right, nullptr, gt, src1, src2);
2749 Move_s(dst, src1);
2750 Branch(&skip);
2751 bind(&right);
2752 Move_s(dst, src2);
2753 }
2754 }
2755 bind(&skip);
2756 }
2757 }
2758
MaxNaNCheck_s(FPURegister dst,FPURegister src1,FPURegister src2,Label * nan)2759 void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
2760 FPURegister src2, Label* nan) {
2761 if (nan) {
2762 BranchF32(nullptr, nan, eq, src1, src2);
2763 }
2764 if (kArchVariant >= kMips64r6) {
2765 max_s(dst, src1, src2);
2766 } else {
2767 Label skip;
2768 if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
2769 if (dst.is(src1)) {
2770 BranchF32(&skip, nullptr, ge, src1, src2);
2771 Move_s(dst, src2);
2772 } else if (dst.is(src2)) {
2773 BranchF32(&skip, nullptr, le, src1, src2);
2774 Move_s(dst, src1);
2775 } else {
2776 Label right;
2777 BranchF32(&right, nullptr, lt, src1, src2);
2778 Move_s(dst, src1);
2779 Branch(&skip);
2780 bind(&right);
2781 Move_s(dst, src2);
2782 }
2783 }
2784 bind(&skip);
2785 }
2786 }
2787
Clz(Register rd,Register rs)2788 void MacroAssembler::Clz(Register rd, Register rs) {
2789 clz(rd, rs);
2790 }
2791
2792
EmitFPUTruncate(FPURoundingMode rounding_mode,Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch,Register except_flag,CheckForInexactConversion check_inexact)2793 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2794 Register result,
2795 DoubleRegister double_input,
2796 Register scratch,
2797 DoubleRegister double_scratch,
2798 Register except_flag,
2799 CheckForInexactConversion check_inexact) {
2800 DCHECK(!result.is(scratch));
2801 DCHECK(!double_input.is(double_scratch));
2802 DCHECK(!except_flag.is(scratch));
2803
2804 Label done;
2805
2806 // Clear the except flag (0 = no exception)
2807 mov(except_flag, zero_reg);
2808
2809 // Test for values that can be exactly represented as a signed 32-bit integer.
2810 cvt_w_d(double_scratch, double_input);
2811 mfc1(result, double_scratch);
2812 cvt_d_w(double_scratch, double_scratch);
2813 BranchF(&done, NULL, eq, double_input, double_scratch);
2814
2815 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
2816
2817 if (check_inexact == kDontCheckForInexactConversion) {
2818 // Ignore inexact exceptions.
2819 except_mask &= ~kFCSRInexactFlagMask;
2820 }
2821
2822 // Save FCSR.
2823 cfc1(scratch, FCSR);
2824 // Disable FPU exceptions.
2825 ctc1(zero_reg, FCSR);
2826
2827 // Do operation based on rounding mode.
2828 switch (rounding_mode) {
2829 case kRoundToNearest:
2830 Round_w_d(double_scratch, double_input);
2831 break;
2832 case kRoundToZero:
2833 Trunc_w_d(double_scratch, double_input);
2834 break;
2835 case kRoundToPlusInf:
2836 Ceil_w_d(double_scratch, double_input);
2837 break;
2838 case kRoundToMinusInf:
2839 Floor_w_d(double_scratch, double_input);
2840 break;
2841 } // End of switch-statement.
2842
2843 // Retrieve FCSR.
2844 cfc1(except_flag, FCSR);
2845 // Restore FCSR.
2846 ctc1(scratch, FCSR);
2847 // Move the converted value into the result register.
2848 mfc1(result, double_scratch);
2849
2850 // Check for fpu exceptions.
2851 And(except_flag, except_flag, Operand(except_mask));
2852
2853 bind(&done);
2854 }
2855
2856
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2857 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2858 DoubleRegister double_input,
2859 Label* done) {
2860 DoubleRegister single_scratch = kLithiumScratchDouble.low();
2861 Register scratch = at;
2862 Register scratch2 = t9;
2863
2864 // Clear cumulative exception flags and save the FCSR.
2865 cfc1(scratch2, FCSR);
2866 ctc1(zero_reg, FCSR);
2867 // Try a conversion to a signed integer.
2868 trunc_w_d(single_scratch, double_input);
2869 mfc1(result, single_scratch);
2870 // Retrieve and restore the FCSR.
2871 cfc1(scratch, FCSR);
2872 ctc1(scratch2, FCSR);
2873 // Check for overflow and NaNs.
2874 And(scratch,
2875 scratch,
2876 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2877 // If we had no exceptions we are done.
2878 Branch(done, eq, scratch, Operand(zero_reg));
2879 }
2880
2881
TruncateDoubleToI(Register result,DoubleRegister double_input)2882 void MacroAssembler::TruncateDoubleToI(Register result,
2883 DoubleRegister double_input) {
2884 Label done;
2885
2886 TryInlineTruncateDoubleToI(result, double_input, &done);
2887
2888 // If we fell through then inline version didn't succeed - call stub instead.
2889 push(ra);
2890 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
2891 sdc1(double_input, MemOperand(sp, 0));
2892
2893 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2894 CallStub(&stub);
2895
2896 Daddu(sp, sp, Operand(kDoubleSize));
2897 pop(ra);
2898
2899 bind(&done);
2900 }
2901
2902
TruncateHeapNumberToI(Register result,Register object)2903 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2904 Label done;
2905 DoubleRegister double_scratch = f12;
2906 DCHECK(!result.is(object));
2907
2908 ldc1(double_scratch,
2909 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2910 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2911
2912 // If we fell through then inline version didn't succeed - call stub instead.
2913 push(ra);
2914 DoubleToIStub stub(isolate(),
2915 object,
2916 result,
2917 HeapNumber::kValueOffset - kHeapObjectTag,
2918 true,
2919 true);
2920 CallStub(&stub);
2921 pop(ra);
2922
2923 bind(&done);
2924 }
2925
2926
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch,Label * not_number)2927 void MacroAssembler::TruncateNumberToI(Register object,
2928 Register result,
2929 Register heap_number_map,
2930 Register scratch,
2931 Label* not_number) {
2932 Label done;
2933 DCHECK(!result.is(object));
2934
2935 UntagAndJumpIfSmi(result, object, &done);
2936 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2937 TruncateHeapNumberToI(result, object);
2938
2939 bind(&done);
2940 }
2941
2942
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2943 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2944 Register src,
2945 int num_least_bits) {
2946 // Ext(dst, src, kSmiTagSize, num_least_bits);
2947 SmiUntag(dst, src);
2948 And(dst, dst, Operand((1 << num_least_bits) - 1));
2949 }
2950
2951
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2952 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2953 Register src,
2954 int num_least_bits) {
2955 DCHECK(!src.is(dst));
2956 And(dst, src, Operand((1 << num_least_bits) - 1));
2957 }
2958
2959
2960 // Emulated condtional branches do not emit a nop in the branch delay slot.
2961 //
2962 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2963 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
2964 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
2965 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2966
2967
Branch(int32_t offset,BranchDelaySlot bdslot)2968 void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2969 DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
2970 BranchShort(offset, bdslot);
2971 }
2972
2973
Branch(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2974 void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2975 const Operand& rt, BranchDelaySlot bdslot) {
2976 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2977 DCHECK(is_near);
2978 USE(is_near);
2979 }
2980
2981
Branch(Label * L,BranchDelaySlot bdslot)2982 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2983 if (L->is_bound()) {
2984 if (is_near_branch(L)) {
2985 BranchShort(L, bdslot);
2986 } else {
2987 BranchLong(L, bdslot);
2988 }
2989 } else {
2990 if (is_trampoline_emitted()) {
2991 BranchLong(L, bdslot);
2992 } else {
2993 BranchShort(L, bdslot);
2994 }
2995 }
2996 }
2997
2998
Branch(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)2999 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
3000 const Operand& rt,
3001 BranchDelaySlot bdslot) {
3002 if (L->is_bound()) {
3003 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
3004 if (cond != cc_always) {
3005 Label skip;
3006 Condition neg_cond = NegateCondition(cond);
3007 BranchShort(&skip, neg_cond, rs, rt);
3008 BranchLong(L, bdslot);
3009 bind(&skip);
3010 } else {
3011 BranchLong(L, bdslot);
3012 }
3013 }
3014 } else {
3015 if (is_trampoline_emitted()) {
3016 if (cond != cc_always) {
3017 Label skip;
3018 Condition neg_cond = NegateCondition(cond);
3019 BranchShort(&skip, neg_cond, rs, rt);
3020 BranchLong(L, bdslot);
3021 bind(&skip);
3022 } else {
3023 BranchLong(L, bdslot);
3024 }
3025 } else {
3026 BranchShort(L, cond, rs, rt, bdslot);
3027 }
3028 }
3029 }
3030
3031
Branch(Label * L,Condition cond,Register rs,Heap::RootListIndex index,BranchDelaySlot bdslot)3032 void MacroAssembler::Branch(Label* L,
3033 Condition cond,
3034 Register rs,
3035 Heap::RootListIndex index,
3036 BranchDelaySlot bdslot) {
3037 LoadRoot(at, index);
3038 Branch(L, cond, rs, Operand(at), bdslot);
3039 }
3040
3041
BranchShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)3042 void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
3043 BranchDelaySlot bdslot) {
3044 DCHECK(L == nullptr || offset == 0);
3045 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3046 b(offset);
3047
3048 // Emit a nop in the branch delay slot if required.
3049 if (bdslot == PROTECT)
3050 nop();
3051 }
3052
3053
BranchShortHelperR6(int32_t offset,Label * L)3054 void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
3055 DCHECK(L == nullptr || offset == 0);
3056 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3057 bc(offset);
3058 }
3059
3060
BranchShort(int32_t offset,BranchDelaySlot bdslot)3061 void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
3062 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3063 DCHECK(is_int26(offset));
3064 BranchShortHelperR6(offset, nullptr);
3065 } else {
3066 DCHECK(is_int16(offset));
3067 BranchShortHelper(offset, nullptr, bdslot);
3068 }
3069 }
3070
3071
BranchShort(Label * L,BranchDelaySlot bdslot)3072 void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
3073 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3074 BranchShortHelperR6(0, L);
3075 } else {
3076 BranchShortHelper(0, L, bdslot);
3077 }
3078 }
3079
3080
IsZero(const Operand & rt)3081 static inline bool IsZero(const Operand& rt) {
3082 if (rt.is_reg()) {
3083 return rt.rm().is(zero_reg);
3084 } else {
3085 return rt.immediate() == 0;
3086 }
3087 }
3088
3089
GetOffset(int32_t offset,Label * L,OffsetSize bits)3090 int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
3091 if (L) {
3092 offset = branch_offset_helper(L, bits) >> 2;
3093 } else {
3094 DCHECK(is_intn(offset, bits));
3095 }
3096 return offset;
3097 }
3098
3099
GetRtAsRegisterHelper(const Operand & rt,Register scratch)3100 Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
3101 Register scratch) {
3102 Register r2 = no_reg;
3103 if (rt.is_reg()) {
3104 r2 = rt.rm_;
3105 } else {
3106 r2 = scratch;
3107 li(r2, rt);
3108 }
3109
3110 return r2;
3111 }
3112
3113
BranchShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)3114 bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
3115 Condition cond, Register rs,
3116 const Operand& rt) {
3117 DCHECK(L == nullptr || offset == 0);
3118 Register scratch = rs.is(at) ? t8 : at;
3119 OffsetSize bits = OffsetSize::kOffset16;
3120
3121 // Be careful to always use shifted_branch_offset only just before the
3122 // branch instruction, as the location will be remember for patching the
3123 // target.
3124 {
3125 BlockTrampolinePoolScope block_trampoline_pool(this);
3126 switch (cond) {
3127 case cc_always:
3128 bits = OffsetSize::kOffset26;
3129 if (!is_near(L, bits)) return false;
3130 offset = GetOffset(offset, L, bits);
3131 bc(offset);
3132 break;
3133 case eq:
3134 if (rs.code() == rt.rm_.reg_code) {
3135 // Pre R6 beq is used here to make the code patchable. Otherwise bc
3136 // should be used which has no condition field so is not patchable.
3137 bits = OffsetSize::kOffset16;
3138 if (!is_near(L, bits)) return false;
3139 scratch = GetRtAsRegisterHelper(rt, scratch);
3140 offset = GetOffset(offset, L, bits);
3141 beq(rs, scratch, offset);
3142 nop();
3143 } else if (IsZero(rt)) {
3144 bits = OffsetSize::kOffset21;
3145 if (!is_near(L, bits)) return false;
3146 offset = GetOffset(offset, L, bits);
3147 beqzc(rs, offset);
3148 } else {
3149 // We don't want any other register but scratch clobbered.
3150 bits = OffsetSize::kOffset16;
3151 if (!is_near(L, bits)) return false;
3152 scratch = GetRtAsRegisterHelper(rt, scratch);
3153 offset = GetOffset(offset, L, bits);
3154 beqc(rs, scratch, offset);
3155 }
3156 break;
3157 case ne:
3158 if (rs.code() == rt.rm_.reg_code) {
3159 // Pre R6 bne is used here to make the code patchable. Otherwise we
3160 // should not generate any instruction.
3161 bits = OffsetSize::kOffset16;
3162 if (!is_near(L, bits)) return false;
3163 scratch = GetRtAsRegisterHelper(rt, scratch);
3164 offset = GetOffset(offset, L, bits);
3165 bne(rs, scratch, offset);
3166 nop();
3167 } else if (IsZero(rt)) {
3168 bits = OffsetSize::kOffset21;
3169 if (!is_near(L, bits)) return false;
3170 offset = GetOffset(offset, L, bits);
3171 bnezc(rs, offset);
3172 } else {
3173 // We don't want any other register but scratch clobbered.
3174 bits = OffsetSize::kOffset16;
3175 if (!is_near(L, bits)) return false;
3176 scratch = GetRtAsRegisterHelper(rt, scratch);
3177 offset = GetOffset(offset, L, bits);
3178 bnec(rs, scratch, offset);
3179 }
3180 break;
3181
3182 // Signed comparison.
3183 case greater:
3184 // rs > rt
3185 if (rs.code() == rt.rm_.reg_code) {
3186 break; // No code needs to be emitted.
3187 } else if (rs.is(zero_reg)) {
3188 bits = OffsetSize::kOffset16;
3189 if (!is_near(L, bits)) return false;
3190 scratch = GetRtAsRegisterHelper(rt, scratch);
3191 offset = GetOffset(offset, L, bits);
3192 bltzc(scratch, offset);
3193 } else if (IsZero(rt)) {
3194 bits = OffsetSize::kOffset16;
3195 if (!is_near(L, bits)) return false;
3196 offset = GetOffset(offset, L, bits);
3197 bgtzc(rs, offset);
3198 } else {
3199 bits = OffsetSize::kOffset16;
3200 if (!is_near(L, bits)) return false;
3201 scratch = GetRtAsRegisterHelper(rt, scratch);
3202 DCHECK(!rs.is(scratch));
3203 offset = GetOffset(offset, L, bits);
3204 bltc(scratch, rs, offset);
3205 }
3206 break;
3207 case greater_equal:
3208 // rs >= rt
3209 if (rs.code() == rt.rm_.reg_code) {
3210 bits = OffsetSize::kOffset26;
3211 if (!is_near(L, bits)) return false;
3212 offset = GetOffset(offset, L, bits);
3213 bc(offset);
3214 } else if (rs.is(zero_reg)) {
3215 bits = OffsetSize::kOffset16;
3216 if (!is_near(L, bits)) return false;
3217 scratch = GetRtAsRegisterHelper(rt, scratch);
3218 offset = GetOffset(offset, L, bits);
3219 blezc(scratch, offset);
3220 } else if (IsZero(rt)) {
3221 bits = OffsetSize::kOffset16;
3222 if (!is_near(L, bits)) return false;
3223 offset = GetOffset(offset, L, bits);
3224 bgezc(rs, offset);
3225 } else {
3226 bits = OffsetSize::kOffset16;
3227 if (!is_near(L, bits)) return false;
3228 scratch = GetRtAsRegisterHelper(rt, scratch);
3229 DCHECK(!rs.is(scratch));
3230 offset = GetOffset(offset, L, bits);
3231 bgec(rs, scratch, offset);
3232 }
3233 break;
3234 case less:
3235 // rs < rt
3236 if (rs.code() == rt.rm_.reg_code) {
3237 break; // No code needs to be emitted.
3238 } else if (rs.is(zero_reg)) {
3239 bits = OffsetSize::kOffset16;
3240 if (!is_near(L, bits)) return false;
3241 scratch = GetRtAsRegisterHelper(rt, scratch);
3242 offset = GetOffset(offset, L, bits);
3243 bgtzc(scratch, offset);
3244 } else if (IsZero(rt)) {
3245 bits = OffsetSize::kOffset16;
3246 if (!is_near(L, bits)) return false;
3247 offset = GetOffset(offset, L, bits);
3248 bltzc(rs, offset);
3249 } else {
3250 bits = OffsetSize::kOffset16;
3251 if (!is_near(L, bits)) return false;
3252 scratch = GetRtAsRegisterHelper(rt, scratch);
3253 DCHECK(!rs.is(scratch));
3254 offset = GetOffset(offset, L, bits);
3255 bltc(rs, scratch, offset);
3256 }
3257 break;
3258 case less_equal:
3259 // rs <= rt
3260 if (rs.code() == rt.rm_.reg_code) {
3261 bits = OffsetSize::kOffset26;
3262 if (!is_near(L, bits)) return false;
3263 offset = GetOffset(offset, L, bits);
3264 bc(offset);
3265 } else if (rs.is(zero_reg)) {
3266 bits = OffsetSize::kOffset16;
3267 if (!is_near(L, bits)) return false;
3268 scratch = GetRtAsRegisterHelper(rt, scratch);
3269 offset = GetOffset(offset, L, bits);
3270 bgezc(scratch, offset);
3271 } else if (IsZero(rt)) {
3272 bits = OffsetSize::kOffset16;
3273 if (!is_near(L, bits)) return false;
3274 offset = GetOffset(offset, L, bits);
3275 blezc(rs, offset);
3276 } else {
3277 bits = OffsetSize::kOffset16;
3278 if (!is_near(L, bits)) return false;
3279 scratch = GetRtAsRegisterHelper(rt, scratch);
3280 DCHECK(!rs.is(scratch));
3281 offset = GetOffset(offset, L, bits);
3282 bgec(scratch, rs, offset);
3283 }
3284 break;
3285
3286 // Unsigned comparison.
3287 case Ugreater:
3288 // rs > rt
3289 if (rs.code() == rt.rm_.reg_code) {
3290 break; // No code needs to be emitted.
3291 } else if (rs.is(zero_reg)) {
3292 bits = OffsetSize::kOffset21;
3293 if (!is_near(L, bits)) return false;
3294 scratch = GetRtAsRegisterHelper(rt, scratch);
3295 offset = GetOffset(offset, L, bits);
3296 bnezc(scratch, offset);
3297 } else if (IsZero(rt)) {
3298 bits = OffsetSize::kOffset21;
3299 if (!is_near(L, bits)) return false;
3300 offset = GetOffset(offset, L, bits);
3301 bnezc(rs, offset);
3302 } else {
3303 bits = OffsetSize::kOffset16;
3304 if (!is_near(L, bits)) return false;
3305 scratch = GetRtAsRegisterHelper(rt, scratch);
3306 DCHECK(!rs.is(scratch));
3307 offset = GetOffset(offset, L, bits);
3308 bltuc(scratch, rs, offset);
3309 }
3310 break;
3311 case Ugreater_equal:
3312 // rs >= rt
3313 if (rs.code() == rt.rm_.reg_code) {
3314 bits = OffsetSize::kOffset26;
3315 if (!is_near(L, bits)) return false;
3316 offset = GetOffset(offset, L, bits);
3317 bc(offset);
3318 } else if (rs.is(zero_reg)) {
3319 bits = OffsetSize::kOffset21;
3320 if (!is_near(L, bits)) return false;
3321 scratch = GetRtAsRegisterHelper(rt, scratch);
3322 offset = GetOffset(offset, L, bits);
3323 beqzc(scratch, offset);
3324 } else if (IsZero(rt)) {
3325 bits = OffsetSize::kOffset26;
3326 if (!is_near(L, bits)) return false;
3327 offset = GetOffset(offset, L, bits);
3328 bc(offset);
3329 } else {
3330 bits = OffsetSize::kOffset16;
3331 if (!is_near(L, bits)) return false;
3332 scratch = GetRtAsRegisterHelper(rt, scratch);
3333 DCHECK(!rs.is(scratch));
3334 offset = GetOffset(offset, L, bits);
3335 bgeuc(rs, scratch, offset);
3336 }
3337 break;
3338 case Uless:
3339 // rs < rt
3340 if (rs.code() == rt.rm_.reg_code) {
3341 break; // No code needs to be emitted.
3342 } else if (rs.is(zero_reg)) {
3343 bits = OffsetSize::kOffset21;
3344 if (!is_near(L, bits)) return false;
3345 scratch = GetRtAsRegisterHelper(rt, scratch);
3346 offset = GetOffset(offset, L, bits);
3347 bnezc(scratch, offset);
3348 } else if (IsZero(rt)) {
3349 break; // No code needs to be emitted.
3350 } else {
3351 bits = OffsetSize::kOffset16;
3352 if (!is_near(L, bits)) return false;
3353 scratch = GetRtAsRegisterHelper(rt, scratch);
3354 DCHECK(!rs.is(scratch));
3355 offset = GetOffset(offset, L, bits);
3356 bltuc(rs, scratch, offset);
3357 }
3358 break;
3359 case Uless_equal:
3360 // rs <= rt
3361 if (rs.code() == rt.rm_.reg_code) {
3362 bits = OffsetSize::kOffset26;
3363 if (!is_near(L, bits)) return false;
3364 offset = GetOffset(offset, L, bits);
3365 bc(offset);
3366 } else if (rs.is(zero_reg)) {
3367 bits = OffsetSize::kOffset26;
3368 if (!is_near(L, bits)) return false;
3369 scratch = GetRtAsRegisterHelper(rt, scratch);
3370 offset = GetOffset(offset, L, bits);
3371 bc(offset);
3372 } else if (IsZero(rt)) {
3373 bits = OffsetSize::kOffset21;
3374 if (!is_near(L, bits)) return false;
3375 offset = GetOffset(offset, L, bits);
3376 beqzc(rs, offset);
3377 } else {
3378 bits = OffsetSize::kOffset16;
3379 if (!is_near(L, bits)) return false;
3380 scratch = GetRtAsRegisterHelper(rt, scratch);
3381 DCHECK(!rs.is(scratch));
3382 offset = GetOffset(offset, L, bits);
3383 bgeuc(scratch, rs, offset);
3384 }
3385 break;
3386 default:
3387 UNREACHABLE();
3388 }
3389 }
3390 CheckTrampolinePoolQuick(1);
3391 return true;
3392 }
3393
3394
BranchShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3395 bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3396 Register rs, const Operand& rt,
3397 BranchDelaySlot bdslot) {
3398 DCHECK(L == nullptr || offset == 0);
3399 if (!is_near(L, OffsetSize::kOffset16)) return false;
3400
3401 Register scratch = at;
3402 int32_t offset32;
3403
3404 // Be careful to always use shifted_branch_offset only just before the
3405 // branch instruction, as the location will be remember for patching the
3406 // target.
3407 {
3408 BlockTrampolinePoolScope block_trampoline_pool(this);
3409 switch (cond) {
3410 case cc_always:
3411 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3412 b(offset32);
3413 break;
3414 case eq:
3415 if (IsZero(rt)) {
3416 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3417 beq(rs, zero_reg, offset32);
3418 } else {
3419 // We don't want any other register but scratch clobbered.
3420 scratch = GetRtAsRegisterHelper(rt, scratch);
3421 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3422 beq(rs, scratch, offset32);
3423 }
3424 break;
3425 case ne:
3426 if (IsZero(rt)) {
3427 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3428 bne(rs, zero_reg, offset32);
3429 } else {
3430 // We don't want any other register but scratch clobbered.
3431 scratch = GetRtAsRegisterHelper(rt, scratch);
3432 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3433 bne(rs, scratch, offset32);
3434 }
3435 break;
3436
3437 // Signed comparison.
3438 case greater:
3439 if (IsZero(rt)) {
3440 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3441 bgtz(rs, offset32);
3442 } else {
3443 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3444 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3445 bne(scratch, zero_reg, offset32);
3446 }
3447 break;
3448 case greater_equal:
3449 if (IsZero(rt)) {
3450 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3451 bgez(rs, offset32);
3452 } else {
3453 Slt(scratch, rs, rt);
3454 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3455 beq(scratch, zero_reg, offset32);
3456 }
3457 break;
3458 case less:
3459 if (IsZero(rt)) {
3460 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3461 bltz(rs, offset32);
3462 } else {
3463 Slt(scratch, rs, rt);
3464 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3465 bne(scratch, zero_reg, offset32);
3466 }
3467 break;
3468 case less_equal:
3469 if (IsZero(rt)) {
3470 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3471 blez(rs, offset32);
3472 } else {
3473 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3474 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3475 beq(scratch, zero_reg, offset32);
3476 }
3477 break;
3478
3479 // Unsigned comparison.
3480 case Ugreater:
3481 if (IsZero(rt)) {
3482 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3483 bne(rs, zero_reg, offset32);
3484 } else {
3485 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3486 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3487 bne(scratch, zero_reg, offset32);
3488 }
3489 break;
3490 case Ugreater_equal:
3491 if (IsZero(rt)) {
3492 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3493 b(offset32);
3494 } else {
3495 Sltu(scratch, rs, rt);
3496 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3497 beq(scratch, zero_reg, offset32);
3498 }
3499 break;
3500 case Uless:
3501 if (IsZero(rt)) {
3502 return true; // No code needs to be emitted.
3503 } else {
3504 Sltu(scratch, rs, rt);
3505 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3506 bne(scratch, zero_reg, offset32);
3507 }
3508 break;
3509 case Uless_equal:
3510 if (IsZero(rt)) {
3511 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3512 beq(rs, zero_reg, offset32);
3513 } else {
3514 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3515 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3516 beq(scratch, zero_reg, offset32);
3517 }
3518 break;
3519 default:
3520 UNREACHABLE();
3521 }
3522 }
3523
3524 // Emit a nop in the branch delay slot if required.
3525 if (bdslot == PROTECT)
3526 nop();
3527
3528 return true;
3529 }
3530
3531
BranchShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3532 bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3533 Register rs, const Operand& rt,
3534 BranchDelaySlot bdslot) {
3535 BRANCH_ARGS_CHECK(cond, rs, rt);
3536
3537 if (!L) {
3538 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3539 DCHECK(is_int26(offset));
3540 return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3541 } else {
3542 DCHECK(is_int16(offset));
3543 return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3544 }
3545 } else {
3546 DCHECK(offset == 0);
3547 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3548 return BranchShortHelperR6(0, L, cond, rs, rt);
3549 } else {
3550 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3551 }
3552 }
3553 return false;
3554 }
3555
3556
BranchShort(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3557 void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3558 const Operand& rt, BranchDelaySlot bdslot) {
3559 BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3560 }
3561
3562
BranchShort(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3563 void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3564 const Operand& rt, BranchDelaySlot bdslot) {
3565 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3566 }
3567
3568
BranchAndLink(int32_t offset,BranchDelaySlot bdslot)3569 void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3570 BranchAndLinkShort(offset, bdslot);
3571 }
3572
3573
BranchAndLink(int32_t offset,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3574 void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3575 const Operand& rt, BranchDelaySlot bdslot) {
3576 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3577 DCHECK(is_near);
3578 USE(is_near);
3579 }
3580
3581
BranchAndLink(Label * L,BranchDelaySlot bdslot)3582 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3583 if (L->is_bound()) {
3584 if (is_near_branch(L)) {
3585 BranchAndLinkShort(L, bdslot);
3586 } else {
3587 BranchAndLinkLong(L, bdslot);
3588 }
3589 } else {
3590 if (is_trampoline_emitted()) {
3591 BranchAndLinkLong(L, bdslot);
3592 } else {
3593 BranchAndLinkShort(L, bdslot);
3594 }
3595 }
3596 }
3597
3598
BranchAndLink(Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3599 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3600 const Operand& rt,
3601 BranchDelaySlot bdslot) {
3602 if (L->is_bound()) {
3603 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3604 Label skip;
3605 Condition neg_cond = NegateCondition(cond);
3606 BranchShort(&skip, neg_cond, rs, rt);
3607 BranchAndLinkLong(L, bdslot);
3608 bind(&skip);
3609 }
3610 } else {
3611 if (is_trampoline_emitted()) {
3612 Label skip;
3613 Condition neg_cond = NegateCondition(cond);
3614 BranchShort(&skip, neg_cond, rs, rt);
3615 BranchAndLinkLong(L, bdslot);
3616 bind(&skip);
3617 } else {
3618 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3619 }
3620 }
3621 }
3622
3623
BranchAndLinkShortHelper(int16_t offset,Label * L,BranchDelaySlot bdslot)3624 void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3625 BranchDelaySlot bdslot) {
3626 DCHECK(L == nullptr || offset == 0);
3627 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3628 bal(offset);
3629
3630 // Emit a nop in the branch delay slot if required.
3631 if (bdslot == PROTECT)
3632 nop();
3633 }
3634
3635
BranchAndLinkShortHelperR6(int32_t offset,Label * L)3636 void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3637 DCHECK(L == nullptr || offset == 0);
3638 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3639 balc(offset);
3640 }
3641
3642
BranchAndLinkShort(int32_t offset,BranchDelaySlot bdslot)3643 void MacroAssembler::BranchAndLinkShort(int32_t offset,
3644 BranchDelaySlot bdslot) {
3645 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3646 DCHECK(is_int26(offset));
3647 BranchAndLinkShortHelperR6(offset, nullptr);
3648 } else {
3649 DCHECK(is_int16(offset));
3650 BranchAndLinkShortHelper(offset, nullptr, bdslot);
3651 }
3652 }
3653
3654
BranchAndLinkShort(Label * L,BranchDelaySlot bdslot)3655 void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3656 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3657 BranchAndLinkShortHelperR6(0, L);
3658 } else {
3659 BranchAndLinkShortHelper(0, L, bdslot);
3660 }
3661 }
3662
3663
BranchAndLinkShortHelperR6(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt)3664 bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3665 Condition cond, Register rs,
3666 const Operand& rt) {
3667 DCHECK(L == nullptr || offset == 0);
3668 Register scratch = rs.is(at) ? t8 : at;
3669 OffsetSize bits = OffsetSize::kOffset16;
3670
3671 BlockTrampolinePoolScope block_trampoline_pool(this);
3672 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3673 switch (cond) {
3674 case cc_always:
3675 bits = OffsetSize::kOffset26;
3676 if (!is_near(L, bits)) return false;
3677 offset = GetOffset(offset, L, bits);
3678 balc(offset);
3679 break;
3680 case eq:
3681 if (!is_near(L, bits)) return false;
3682 Subu(scratch, rs, rt);
3683 offset = GetOffset(offset, L, bits);
3684 beqzalc(scratch, offset);
3685 break;
3686 case ne:
3687 if (!is_near(L, bits)) return false;
3688 Subu(scratch, rs, rt);
3689 offset = GetOffset(offset, L, bits);
3690 bnezalc(scratch, offset);
3691 break;
3692
3693 // Signed comparison.
3694 case greater:
3695 // rs > rt
3696 if (rs.code() == rt.rm_.reg_code) {
3697 break; // No code needs to be emitted.
3698 } else if (rs.is(zero_reg)) {
3699 if (!is_near(L, bits)) return false;
3700 scratch = GetRtAsRegisterHelper(rt, scratch);
3701 offset = GetOffset(offset, L, bits);
3702 bltzalc(scratch, offset);
3703 } else if (IsZero(rt)) {
3704 if (!is_near(L, bits)) return false;
3705 offset = GetOffset(offset, L, bits);
3706 bgtzalc(rs, offset);
3707 } else {
3708 if (!is_near(L, bits)) return false;
3709 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3710 offset = GetOffset(offset, L, bits);
3711 bnezalc(scratch, offset);
3712 }
3713 break;
3714 case greater_equal:
3715 // rs >= rt
3716 if (rs.code() == rt.rm_.reg_code) {
3717 bits = OffsetSize::kOffset26;
3718 if (!is_near(L, bits)) return false;
3719 offset = GetOffset(offset, L, bits);
3720 balc(offset);
3721 } else if (rs.is(zero_reg)) {
3722 if (!is_near(L, bits)) return false;
3723 scratch = GetRtAsRegisterHelper(rt, scratch);
3724 offset = GetOffset(offset, L, bits);
3725 blezalc(scratch, offset);
3726 } else if (IsZero(rt)) {
3727 if (!is_near(L, bits)) return false;
3728 offset = GetOffset(offset, L, bits);
3729 bgezalc(rs, offset);
3730 } else {
3731 if (!is_near(L, bits)) return false;
3732 Slt(scratch, rs, rt);
3733 offset = GetOffset(offset, L, bits);
3734 beqzalc(scratch, offset);
3735 }
3736 break;
3737 case less:
3738 // rs < rt
3739 if (rs.code() == rt.rm_.reg_code) {
3740 break; // No code needs to be emitted.
3741 } else if (rs.is(zero_reg)) {
3742 if (!is_near(L, bits)) return false;
3743 scratch = GetRtAsRegisterHelper(rt, scratch);
3744 offset = GetOffset(offset, L, bits);
3745 bgtzalc(scratch, offset);
3746 } else if (IsZero(rt)) {
3747 if (!is_near(L, bits)) return false;
3748 offset = GetOffset(offset, L, bits);
3749 bltzalc(rs, offset);
3750 } else {
3751 if (!is_near(L, bits)) return false;
3752 Slt(scratch, rs, rt);
3753 offset = GetOffset(offset, L, bits);
3754 bnezalc(scratch, offset);
3755 }
3756 break;
3757 case less_equal:
3758 // rs <= r2
3759 if (rs.code() == rt.rm_.reg_code) {
3760 bits = OffsetSize::kOffset26;
3761 if (!is_near(L, bits)) return false;
3762 offset = GetOffset(offset, L, bits);
3763 balc(offset);
3764 } else if (rs.is(zero_reg)) {
3765 if (!is_near(L, bits)) return false;
3766 scratch = GetRtAsRegisterHelper(rt, scratch);
3767 offset = GetOffset(offset, L, bits);
3768 bgezalc(scratch, offset);
3769 } else if (IsZero(rt)) {
3770 if (!is_near(L, bits)) return false;
3771 offset = GetOffset(offset, L, bits);
3772 blezalc(rs, offset);
3773 } else {
3774 if (!is_near(L, bits)) return false;
3775 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3776 offset = GetOffset(offset, L, bits);
3777 beqzalc(scratch, offset);
3778 }
3779 break;
3780
3781
3782 // Unsigned comparison.
3783 case Ugreater:
3784 // rs > r2
3785 if (!is_near(L, bits)) return false;
3786 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3787 offset = GetOffset(offset, L, bits);
3788 bnezalc(scratch, offset);
3789 break;
3790 case Ugreater_equal:
3791 // rs >= r2
3792 if (!is_near(L, bits)) return false;
3793 Sltu(scratch, rs, rt);
3794 offset = GetOffset(offset, L, bits);
3795 beqzalc(scratch, offset);
3796 break;
3797 case Uless:
3798 // rs < r2
3799 if (!is_near(L, bits)) return false;
3800 Sltu(scratch, rs, rt);
3801 offset = GetOffset(offset, L, bits);
3802 bnezalc(scratch, offset);
3803 break;
3804 case Uless_equal:
3805 // rs <= r2
3806 if (!is_near(L, bits)) return false;
3807 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3808 offset = GetOffset(offset, L, bits);
3809 beqzalc(scratch, offset);
3810 break;
3811 default:
3812 UNREACHABLE();
3813 }
3814 return true;
3815 }
3816
3817
3818 // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3819 // with the slt instructions. We could use sub or add instead but we would miss
3820 // overflow cases, so we keep slt and add an intermediate third instruction.
BranchAndLinkShortHelper(int16_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3821 bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3822 Condition cond, Register rs,
3823 const Operand& rt,
3824 BranchDelaySlot bdslot) {
3825 DCHECK(L == nullptr || offset == 0);
3826 if (!is_near(L, OffsetSize::kOffset16)) return false;
3827
3828 Register scratch = t8;
3829 BlockTrampolinePoolScope block_trampoline_pool(this);
3830
3831 switch (cond) {
3832 case cc_always:
3833 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3834 bal(offset);
3835 break;
3836 case eq:
3837 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3838 nop();
3839 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3840 bal(offset);
3841 break;
3842 case ne:
3843 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3844 nop();
3845 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3846 bal(offset);
3847 break;
3848
3849 // Signed comparison.
3850 case greater:
3851 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3852 addiu(scratch, scratch, -1);
3853 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3854 bgezal(scratch, offset);
3855 break;
3856 case greater_equal:
3857 Slt(scratch, rs, rt);
3858 addiu(scratch, scratch, -1);
3859 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3860 bltzal(scratch, offset);
3861 break;
3862 case less:
3863 Slt(scratch, rs, rt);
3864 addiu(scratch, scratch, -1);
3865 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3866 bgezal(scratch, offset);
3867 break;
3868 case less_equal:
3869 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3870 addiu(scratch, scratch, -1);
3871 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3872 bltzal(scratch, offset);
3873 break;
3874
3875 // Unsigned comparison.
3876 case Ugreater:
3877 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3878 addiu(scratch, scratch, -1);
3879 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3880 bgezal(scratch, offset);
3881 break;
3882 case Ugreater_equal:
3883 Sltu(scratch, rs, rt);
3884 addiu(scratch, scratch, -1);
3885 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3886 bltzal(scratch, offset);
3887 break;
3888 case Uless:
3889 Sltu(scratch, rs, rt);
3890 addiu(scratch, scratch, -1);
3891 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3892 bgezal(scratch, offset);
3893 break;
3894 case Uless_equal:
3895 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3896 addiu(scratch, scratch, -1);
3897 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3898 bltzal(scratch, offset);
3899 break;
3900
3901 default:
3902 UNREACHABLE();
3903 }
3904
3905 // Emit a nop in the branch delay slot if required.
3906 if (bdslot == PROTECT)
3907 nop();
3908
3909 return true;
3910 }
3911
3912
BranchAndLinkShortCheck(int32_t offset,Label * L,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bdslot)3913 bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3914 Condition cond, Register rs,
3915 const Operand& rt,
3916 BranchDelaySlot bdslot) {
3917 BRANCH_ARGS_CHECK(cond, rs, rt);
3918
3919 if (!L) {
3920 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3921 DCHECK(is_int26(offset));
3922 return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3923 } else {
3924 DCHECK(is_int16(offset));
3925 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3926 }
3927 } else {
3928 DCHECK(offset == 0);
3929 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3930 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3931 } else {
3932 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3933 }
3934 }
3935 return false;
3936 }
3937
3938
Jump(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3939 void MacroAssembler::Jump(Register target,
3940 Condition cond,
3941 Register rs,
3942 const Operand& rt,
3943 BranchDelaySlot bd) {
3944 BlockTrampolinePoolScope block_trampoline_pool(this);
3945 if (kArchVariant == kMips64r6 && bd == PROTECT) {
3946 if (cond == cc_always) {
3947 jic(target, 0);
3948 } else {
3949 BRANCH_ARGS_CHECK(cond, rs, rt);
3950 Branch(2, NegateCondition(cond), rs, rt);
3951 jic(target, 0);
3952 }
3953 } else {
3954 if (cond == cc_always) {
3955 jr(target);
3956 } else {
3957 BRANCH_ARGS_CHECK(cond, rs, rt);
3958 Branch(2, NegateCondition(cond), rs, rt);
3959 jr(target);
3960 }
3961 // Emit a nop in the branch delay slot if required.
3962 if (bd == PROTECT) nop();
3963 }
3964 }
3965
3966
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3967 void MacroAssembler::Jump(intptr_t target,
3968 RelocInfo::Mode rmode,
3969 Condition cond,
3970 Register rs,
3971 const Operand& rt,
3972 BranchDelaySlot bd) {
3973 Label skip;
3974 if (cond != cc_always) {
3975 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3976 }
3977 // The first instruction of 'li' may be placed in the delay slot.
3978 // This is not an issue, t9 is expected to be clobbered anyway.
3979 li(t9, Operand(target, rmode));
3980 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3981 bind(&skip);
3982 }
3983
3984
Jump(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3985 void MacroAssembler::Jump(Address target,
3986 RelocInfo::Mode rmode,
3987 Condition cond,
3988 Register rs,
3989 const Operand& rt,
3990 BranchDelaySlot bd) {
3991 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3992 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3993 }
3994
3995
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)3996 void MacroAssembler::Jump(Handle<Code> code,
3997 RelocInfo::Mode rmode,
3998 Condition cond,
3999 Register rs,
4000 const Operand& rt,
4001 BranchDelaySlot bd) {
4002 DCHECK(RelocInfo::IsCodeTarget(rmode));
4003 AllowDeferredHandleDereference embedding_raw_address;
4004 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
4005 }
4006
4007
CallSize(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4008 int MacroAssembler::CallSize(Register target,
4009 Condition cond,
4010 Register rs,
4011 const Operand& rt,
4012 BranchDelaySlot bd) {
4013 int size = 0;
4014
4015 if (cond == cc_always) {
4016 size += 1;
4017 } else {
4018 size += 3;
4019 }
4020
4021 if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
4022
4023 return size * kInstrSize;
4024 }
4025
4026
4027 // Note: To call gcc-compiled C code on mips, you must call thru t9.
Call(Register target,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4028 void MacroAssembler::Call(Register target,
4029 Condition cond,
4030 Register rs,
4031 const Operand& rt,
4032 BranchDelaySlot bd) {
4033 #ifdef DEBUG
4034 int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
4035 #endif
4036
4037 BlockTrampolinePoolScope block_trampoline_pool(this);
4038 Label start;
4039 bind(&start);
4040 if (kArchVariant == kMips64r6 && bd == PROTECT) {
4041 if (cond == cc_always) {
4042 jialc(target, 0);
4043 } else {
4044 BRANCH_ARGS_CHECK(cond, rs, rt);
4045 Branch(2, NegateCondition(cond), rs, rt);
4046 jialc(target, 0);
4047 }
4048 } else {
4049 if (cond == cc_always) {
4050 jalr(target);
4051 } else {
4052 BRANCH_ARGS_CHECK(cond, rs, rt);
4053 Branch(2, NegateCondition(cond), rs, rt);
4054 jalr(target);
4055 }
4056 // Emit a nop in the branch delay slot if required.
4057 if (bd == PROTECT) nop();
4058 }
4059
4060 #ifdef DEBUG
4061 CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
4062 SizeOfCodeGeneratedSince(&start));
4063 #endif
4064 }
4065
4066
CallSize(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4067 int MacroAssembler::CallSize(Address target,
4068 RelocInfo::Mode rmode,
4069 Condition cond,
4070 Register rs,
4071 const Operand& rt,
4072 BranchDelaySlot bd) {
4073 int size = CallSize(t9, cond, rs, rt, bd);
4074 return size + 4 * kInstrSize;
4075 }
4076
4077
Call(Address target,RelocInfo::Mode rmode,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4078 void MacroAssembler::Call(Address target,
4079 RelocInfo::Mode rmode,
4080 Condition cond,
4081 Register rs,
4082 const Operand& rt,
4083 BranchDelaySlot bd) {
4084 BlockTrampolinePoolScope block_trampoline_pool(this);
4085 Label start;
4086 bind(&start);
4087 int64_t target_int = reinterpret_cast<int64_t>(target);
4088 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
4089 Call(t9, cond, rs, rt, bd);
4090 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
4091 SizeOfCodeGeneratedSince(&start));
4092 }
4093
4094
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4095 int MacroAssembler::CallSize(Handle<Code> code,
4096 RelocInfo::Mode rmode,
4097 TypeFeedbackId ast_id,
4098 Condition cond,
4099 Register rs,
4100 const Operand& rt,
4101 BranchDelaySlot bd) {
4102 AllowDeferredHandleDereference using_raw_address;
4103 return CallSize(reinterpret_cast<Address>(code.location()),
4104 rmode, cond, rs, rt, bd);
4105 }
4106
4107
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4108 void MacroAssembler::Call(Handle<Code> code,
4109 RelocInfo::Mode rmode,
4110 TypeFeedbackId ast_id,
4111 Condition cond,
4112 Register rs,
4113 const Operand& rt,
4114 BranchDelaySlot bd) {
4115 BlockTrampolinePoolScope block_trampoline_pool(this);
4116 Label start;
4117 bind(&start);
4118 DCHECK(RelocInfo::IsCodeTarget(rmode));
4119 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
4120 SetRecordedAstId(ast_id);
4121 rmode = RelocInfo::CODE_TARGET_WITH_ID;
4122 }
4123 AllowDeferredHandleDereference embedding_raw_address;
4124 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
4125 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
4126 SizeOfCodeGeneratedSince(&start));
4127 }
4128
4129
Ret(Condition cond,Register rs,const Operand & rt,BranchDelaySlot bd)4130 void MacroAssembler::Ret(Condition cond,
4131 Register rs,
4132 const Operand& rt,
4133 BranchDelaySlot bd) {
4134 Jump(ra, cond, rs, rt, bd);
4135 }
4136
4137
BranchLong(Label * L,BranchDelaySlot bdslot)4138 void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
4139 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4140 (!L->is_bound() || is_near_r6(L))) {
4141 BranchShortHelperR6(0, L);
4142 } else {
4143 EmitForbiddenSlotInstruction();
4144 BlockTrampolinePoolScope block_trampoline_pool(this);
4145 {
4146 BlockGrowBufferScope block_buf_growth(this);
4147 // Buffer growth (and relocation) must be blocked for internal references
4148 // until associated instructions are emitted and available to be patched.
4149 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
4150 j(L);
4151 }
4152 // Emit a nop in the branch delay slot if required.
4153 if (bdslot == PROTECT) nop();
4154 }
4155 }
4156
4157
BranchAndLinkLong(Label * L,BranchDelaySlot bdslot)4158 void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
4159 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4160 (!L->is_bound() || is_near_r6(L))) {
4161 BranchAndLinkShortHelperR6(0, L);
4162 } else {
4163 EmitForbiddenSlotInstruction();
4164 BlockTrampolinePoolScope block_trampoline_pool(this);
4165 {
4166 BlockGrowBufferScope block_buf_growth(this);
4167 // Buffer growth (and relocation) must be blocked for internal references
4168 // until associated instructions are emitted and available to be patched.
4169 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
4170 jal(L);
4171 }
4172 // Emit a nop in the branch delay slot if required.
4173 if (bdslot == PROTECT) nop();
4174 }
4175 }
4176
4177
DropAndRet(int drop)4178 void MacroAssembler::DropAndRet(int drop) {
4179 DCHECK(is_int16(drop * kPointerSize));
4180 Ret(USE_DELAY_SLOT);
4181 daddiu(sp, sp, drop * kPointerSize);
4182 }
4183
DropAndRet(int drop,Condition cond,Register r1,const Operand & r2)4184 void MacroAssembler::DropAndRet(int drop,
4185 Condition cond,
4186 Register r1,
4187 const Operand& r2) {
4188 // Both Drop and Ret need to be conditional.
4189 Label skip;
4190 if (cond != cc_always) {
4191 Branch(&skip, NegateCondition(cond), r1, r2);
4192 }
4193
4194 Drop(drop);
4195 Ret();
4196
4197 if (cond != cc_always) {
4198 bind(&skip);
4199 }
4200 }
4201
4202
Drop(int count,Condition cond,Register reg,const Operand & op)4203 void MacroAssembler::Drop(int count,
4204 Condition cond,
4205 Register reg,
4206 const Operand& op) {
4207 if (count <= 0) {
4208 return;
4209 }
4210
4211 Label skip;
4212
4213 if (cond != al) {
4214 Branch(&skip, NegateCondition(cond), reg, op);
4215 }
4216
4217 Daddu(sp, sp, Operand(count * kPointerSize));
4218
4219 if (cond != al) {
4220 bind(&skip);
4221 }
4222 }
4223
4224
4225
Swap(Register reg1,Register reg2,Register scratch)4226 void MacroAssembler::Swap(Register reg1,
4227 Register reg2,
4228 Register scratch) {
4229 if (scratch.is(no_reg)) {
4230 Xor(reg1, reg1, Operand(reg2));
4231 Xor(reg2, reg2, Operand(reg1));
4232 Xor(reg1, reg1, Operand(reg2));
4233 } else {
4234 mov(scratch, reg1);
4235 mov(reg1, reg2);
4236 mov(reg2, scratch);
4237 }
4238 }
4239
4240
Call(Label * target)4241 void MacroAssembler::Call(Label* target) {
4242 BranchAndLink(target);
4243 }
4244
4245
Push(Handle<Object> handle)4246 void MacroAssembler::Push(Handle<Object> handle) {
4247 li(at, Operand(handle));
4248 push(at);
4249 }
4250
4251
PushRegisterAsTwoSmis(Register src,Register scratch)4252 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
4253 DCHECK(!src.is(scratch));
4254 mov(scratch, src);
4255 dsrl32(src, src, 0);
4256 dsll32(src, src, 0);
4257 push(src);
4258 dsll32(scratch, scratch, 0);
4259 push(scratch);
4260 }
4261
4262
PopRegisterAsTwoSmis(Register dst,Register scratch)4263 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
4264 DCHECK(!dst.is(scratch));
4265 pop(scratch);
4266 dsrl32(scratch, scratch, 0);
4267 pop(dst);
4268 dsrl32(dst, dst, 0);
4269 dsll32(dst, dst, 0);
4270 or_(dst, dst, scratch);
4271 }
4272
4273
DebugBreak()4274 void MacroAssembler::DebugBreak() {
4275 PrepareCEntryArgs(0);
4276 PrepareCEntryFunction(
4277 ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
4278 CEntryStub ces(isolate(), 1);
4279 DCHECK(AllowThisStubCall(&ces));
4280 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
4281 }
4282
4283
4284 // ---------------------------------------------------------------------------
4285 // Exception handling.
4286
PushStackHandler()4287 void MacroAssembler::PushStackHandler() {
4288 // Adjust this code if not the case.
4289 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
4290 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
4291
4292 // Link the current handler as the next handler.
4293 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4294 ld(a5, MemOperand(a6));
4295 push(a5);
4296
4297 // Set this new handler as the current one.
4298 sd(sp, MemOperand(a6));
4299 }
4300
4301
PopStackHandler()4302 void MacroAssembler::PopStackHandler() {
4303 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4304 pop(a1);
4305 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
4306 kPointerSize)));
4307 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4308 sd(a1, MemOperand(at));
4309 }
4310
4311
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)4312 void MacroAssembler::Allocate(int object_size,
4313 Register result,
4314 Register scratch1,
4315 Register scratch2,
4316 Label* gc_required,
4317 AllocationFlags flags) {
4318 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4319 if (!FLAG_inline_new) {
4320 if (emit_debug_code()) {
4321 // Trash the registers to simulate an allocation failure.
4322 li(result, 0x7091);
4323 li(scratch1, 0x7191);
4324 li(scratch2, 0x7291);
4325 }
4326 jmp(gc_required);
4327 return;
4328 }
4329
4330 DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
4331
4332 // Make object size into bytes.
4333 if ((flags & SIZE_IN_WORDS) != 0) {
4334 object_size *= kPointerSize;
4335 }
4336 DCHECK(0 == (object_size & kObjectAlignmentMask));
4337
4338 // Check relative positions of allocation top and limit addresses.
4339 // ARM adds additional checks to make sure the ldm instruction can be
4340 // used. On MIPS we don't have ldm so we don't need additional checks either.
4341 ExternalReference allocation_top =
4342 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4343 ExternalReference allocation_limit =
4344 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4345
4346 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4347 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4348 DCHECK((limit - top) == kPointerSize);
4349
4350 // Set up allocation top address and allocation limit registers.
4351 Register top_address = scratch1;
4352 // This code stores a temporary value in t9.
4353 Register alloc_limit = t9;
4354 Register result_end = scratch2;
4355 li(top_address, Operand(allocation_top));
4356
4357 if ((flags & RESULT_CONTAINS_TOP) == 0) {
4358 // Load allocation top into result and allocation limit into alloc_limit.
4359 ld(result, MemOperand(top_address));
4360 ld(alloc_limit, MemOperand(top_address, kPointerSize));
4361 } else {
4362 if (emit_debug_code()) {
4363 // Assert that result actually contains top on entry.
4364 ld(alloc_limit, MemOperand(top_address));
4365 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4366 }
4367 // Load allocation limit. Result already contains allocation top.
4368 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
4369 }
4370
4371 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4372 // the same alignment on ARM64.
4373 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4374
4375 if (emit_debug_code()) {
4376 And(at, result, Operand(kDoubleAlignmentMask));
4377 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4378 }
4379
4380 // Calculate new top and bail out if new space is exhausted. Use result
4381 // to calculate the new top.
4382 Daddu(result_end, result, Operand(object_size));
4383 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4384
4385 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4386 // The top pointer is not updated for allocation folding dominators.
4387 sd(result_end, MemOperand(top_address));
4388 }
4389
4390 // Tag object.
4391 Daddu(result, result, Operand(kHeapObjectTag));
4392 }
4393
4394
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)4395 void MacroAssembler::Allocate(Register object_size, Register result,
4396 Register result_end, Register scratch,
4397 Label* gc_required, AllocationFlags flags) {
4398 if (!FLAG_inline_new) {
4399 if (emit_debug_code()) {
4400 // Trash the registers to simulate an allocation failure.
4401 li(result, 0x7091);
4402 li(scratch, 0x7191);
4403 li(result_end, 0x7291);
4404 }
4405 jmp(gc_required);
4406 return;
4407 }
4408
4409 // |object_size| and |result_end| may overlap, other registers must not.
4410 DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4411 DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4412
4413 // Check relative positions of allocation top and limit addresses.
4414 // ARM adds additional checks to make sure the ldm instruction can be
4415 // used. On MIPS we don't have ldm so we don't need additional checks either.
4416 ExternalReference allocation_top =
4417 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4418 ExternalReference allocation_limit =
4419 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4420 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4421 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4422 DCHECK((limit - top) == kPointerSize);
4423
4424 // Set up allocation top address and object size registers.
4425 Register top_address = scratch;
4426 // This code stores a temporary value in t9.
4427 Register alloc_limit = t9;
4428 li(top_address, Operand(allocation_top));
4429
4430 if ((flags & RESULT_CONTAINS_TOP) == 0) {
4431 // Load allocation top into result and allocation limit into alloc_limit.
4432 ld(result, MemOperand(top_address));
4433 ld(alloc_limit, MemOperand(top_address, kPointerSize));
4434 } else {
4435 if (emit_debug_code()) {
4436 // Assert that result actually contains top on entry.
4437 ld(alloc_limit, MemOperand(top_address));
4438 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4439 }
4440 // Load allocation limit. Result already contains allocation top.
4441 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
4442 }
4443
4444 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4445 // the same alignment on ARM64.
4446 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4447
4448 if (emit_debug_code()) {
4449 And(at, result, Operand(kDoubleAlignmentMask));
4450 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4451 }
4452
4453 // Calculate new top and bail out if new space is exhausted. Use result
4454 // to calculate the new top. Object size may be in words so a shift is
4455 // required to get the number of bytes.
4456 if ((flags & SIZE_IN_WORDS) != 0) {
4457 Dlsa(result_end, result, object_size, kPointerSizeLog2);
4458 } else {
4459 Daddu(result_end, result, Operand(object_size));
4460 }
4461
4462 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4463
4464 // Update allocation top. result temporarily holds the new top.
4465 if (emit_debug_code()) {
4466 And(at, result_end, Operand(kObjectAlignmentMask));
4467 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
4468 }
4469
4470 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4471 // The top pointer is not updated for allocation folding dominators.
4472 sd(result_end, MemOperand(top_address));
4473 }
4474
4475 // Tag object if.
4476 Daddu(result, result, Operand(kHeapObjectTag));
4477 }
4478
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)4479 void MacroAssembler::FastAllocate(int object_size, Register result,
4480 Register scratch1, Register scratch2,
4481 AllocationFlags flags) {
4482 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4483 DCHECK(!AreAliased(result, scratch1, scratch2, at));
4484
4485 // Make object size into bytes.
4486 if ((flags & SIZE_IN_WORDS) != 0) {
4487 object_size *= kPointerSize;
4488 }
4489 DCHECK(0 == (object_size & kObjectAlignmentMask));
4490
4491 ExternalReference allocation_top =
4492 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4493
4494 Register top_address = scratch1;
4495 Register result_end = scratch2;
4496 li(top_address, Operand(allocation_top));
4497 ld(result, MemOperand(top_address));
4498
4499 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4500 // the same alignment on MIPS64.
4501 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4502
4503 if (emit_debug_code()) {
4504 And(at, result, Operand(kDoubleAlignmentMask));
4505 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4506 }
4507
4508 // Calculate new top and write it back.
4509 Daddu(result_end, result, Operand(object_size));
4510 sd(result_end, MemOperand(top_address));
4511
4512 Daddu(result, result, Operand(kHeapObjectTag));
4513 }
4514
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)4515 void MacroAssembler::FastAllocate(Register object_size, Register result,
4516 Register result_end, Register scratch,
4517 AllocationFlags flags) {
4518 // |object_size| and |result_end| may overlap, other registers must not.
4519 DCHECK(!AreAliased(object_size, result, scratch, at));
4520 DCHECK(!AreAliased(result_end, result, scratch, at));
4521
4522 ExternalReference allocation_top =
4523 AllocationUtils::GetAllocationTopReference(isolate(), flags);
4524
4525 // Set up allocation top address and object size registers.
4526 Register top_address = scratch;
4527 li(top_address, Operand(allocation_top));
4528 ld(result, MemOperand(top_address));
4529
4530 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4531 // the same alignment on MIPS64.
4532 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4533
4534 if (emit_debug_code()) {
4535 And(at, result, Operand(kDoubleAlignmentMask));
4536 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4537 }
4538
4539 // Calculate new top and write it back
4540 if ((flags & SIZE_IN_WORDS) != 0) {
4541 Dlsa(result_end, result, object_size, kPointerSizeLog2);
4542 } else {
4543 Daddu(result_end, result, Operand(object_size));
4544 }
4545
4546 // Update allocation top. result temporarily holds the new top.
4547 if (emit_debug_code()) {
4548 And(at, result_end, Operand(kObjectAlignmentMask));
4549 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
4550 }
4551
4552 Daddu(result, result, Operand(kHeapObjectTag));
4553 }
4554
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4555 void MacroAssembler::AllocateTwoByteString(Register result,
4556 Register length,
4557 Register scratch1,
4558 Register scratch2,
4559 Register scratch3,
4560 Label* gc_required) {
4561 // Calculate the number of bytes needed for the characters in the string while
4562 // observing object alignment.
4563 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4564 dsll(scratch1, length, 1); // Length in bytes, not chars.
4565 daddiu(scratch1, scratch1,
4566 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
4567 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4568
4569 // Allocate two-byte string in new space.
4570 Allocate(scratch1, result, scratch2, scratch3, gc_required,
4571 NO_ALLOCATION_FLAGS);
4572
4573 // Set the map, length and hash field.
4574 InitializeNewString(result,
4575 length,
4576 Heap::kStringMapRootIndex,
4577 scratch1,
4578 scratch2);
4579 }
4580
4581
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)4582 void MacroAssembler::AllocateOneByteString(Register result, Register length,
4583 Register scratch1, Register scratch2,
4584 Register scratch3,
4585 Label* gc_required) {
4586 // Calculate the number of bytes needed for the characters in the string
4587 // while observing object alignment.
4588 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4589 DCHECK(kCharSize == 1);
4590 daddiu(scratch1, length,
4591 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
4592 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4593
4594 // Allocate one-byte string in new space.
4595 Allocate(scratch1, result, scratch2, scratch3, gc_required,
4596 NO_ALLOCATION_FLAGS);
4597
4598 // Set the map, length and hash field.
4599 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
4600 scratch1, scratch2);
4601 }
4602
4603
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4604 void MacroAssembler::AllocateTwoByteConsString(Register result,
4605 Register length,
4606 Register scratch1,
4607 Register scratch2,
4608 Label* gc_required) {
4609 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4610 NO_ALLOCATION_FLAGS);
4611 InitializeNewString(result,
4612 length,
4613 Heap::kConsStringMapRootIndex,
4614 scratch1,
4615 scratch2);
4616 }
4617
4618
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4619 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
4620 Register scratch1,
4621 Register scratch2,
4622 Label* gc_required) {
4623 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4624 NO_ALLOCATION_FLAGS);
4625
4626 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
4627 scratch1, scratch2);
4628 }
4629
4630
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4631 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4632 Register length,
4633 Register scratch1,
4634 Register scratch2,
4635 Label* gc_required) {
4636 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4637 NO_ALLOCATION_FLAGS);
4638
4639 InitializeNewString(result,
4640 length,
4641 Heap::kSlicedStringMapRootIndex,
4642 scratch1,
4643 scratch2);
4644 }
4645
4646
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)4647 void MacroAssembler::AllocateOneByteSlicedString(Register result,
4648 Register length,
4649 Register scratch1,
4650 Register scratch2,
4651 Label* gc_required) {
4652 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4653 NO_ALLOCATION_FLAGS);
4654
4655 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
4656 scratch1, scratch2);
4657 }
4658
4659
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)4660 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4661 Label* not_unique_name) {
4662 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4663 Label succeed;
4664 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4665 Branch(&succeed, eq, at, Operand(zero_reg));
4666 Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4667
4668 bind(&succeed);
4669 }
4670
4671
4672 // Allocates a heap number or jumps to the label if the young space is full and
4673 // a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * need_gc,MutableMode mode)4674 void MacroAssembler::AllocateHeapNumber(Register result,
4675 Register scratch1,
4676 Register scratch2,
4677 Register heap_number_map,
4678 Label* need_gc,
4679 MutableMode mode) {
4680 // Allocate an object in the heap for the heap number and tag it as a heap
4681 // object.
4682 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4683 NO_ALLOCATION_FLAGS);
4684
4685 Heap::RootListIndex map_index = mode == MUTABLE
4686 ? Heap::kMutableHeapNumberMapRootIndex
4687 : Heap::kHeapNumberMapRootIndex;
4688 AssertIsRoot(heap_number_map, map_index);
4689
4690 // Store heap number map in the allocated object.
4691 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4692 }
4693
4694
AllocateHeapNumberWithValue(Register result,FPURegister value,Register scratch1,Register scratch2,Label * gc_required)4695 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4696 FPURegister value,
4697 Register scratch1,
4698 Register scratch2,
4699 Label* gc_required) {
4700 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4701 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4702 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4703 }
4704
4705
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)4706 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4707 Register value, Register scratch1,
4708 Register scratch2, Label* gc_required) {
4709 DCHECK(!result.is(constructor));
4710 DCHECK(!result.is(scratch1));
4711 DCHECK(!result.is(scratch2));
4712 DCHECK(!result.is(value));
4713
4714 // Allocate JSValue in new space.
4715 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
4716 NO_ALLOCATION_FLAGS);
4717
4718 // Initialize the JSValue.
4719 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4720 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4721 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4722 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4723 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4724 sd(value, FieldMemOperand(result, JSValue::kValueOffset));
4725 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4726 }
4727
4728
CopyBytes(Register src,Register dst,Register length,Register scratch)4729 void MacroAssembler::CopyBytes(Register src,
4730 Register dst,
4731 Register length,
4732 Register scratch) {
4733 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
4734
4735 // Align src before copying in word size chunks.
4736 Branch(&byte_loop, le, length, Operand(kPointerSize));
4737 bind(&align_loop_1);
4738 And(scratch, src, kPointerSize - 1);
4739 Branch(&word_loop, eq, scratch, Operand(zero_reg));
4740 lbu(scratch, MemOperand(src));
4741 Daddu(src, src, 1);
4742 sb(scratch, MemOperand(dst));
4743 Daddu(dst, dst, 1);
4744 Dsubu(length, length, Operand(1));
4745 Branch(&align_loop_1, ne, length, Operand(zero_reg));
4746
4747 // Copy bytes in word size chunks.
4748 bind(&word_loop);
4749 if (emit_debug_code()) {
4750 And(scratch, src, kPointerSize - 1);
4751 Assert(eq, kExpectingAlignmentForCopyBytes,
4752 scratch, Operand(zero_reg));
4753 }
4754 Branch(&byte_loop, lt, length, Operand(kPointerSize));
4755 ld(scratch, MemOperand(src));
4756 Daddu(src, src, kPointerSize);
4757
4758 // TODO(kalmard) check if this can be optimized to use sw in most cases.
4759 // Can't use unaligned access - copy byte by byte.
4760 if (kArchEndian == kLittle) {
4761 sb(scratch, MemOperand(dst, 0));
4762 dsrl(scratch, scratch, 8);
4763 sb(scratch, MemOperand(dst, 1));
4764 dsrl(scratch, scratch, 8);
4765 sb(scratch, MemOperand(dst, 2));
4766 dsrl(scratch, scratch, 8);
4767 sb(scratch, MemOperand(dst, 3));
4768 dsrl(scratch, scratch, 8);
4769 sb(scratch, MemOperand(dst, 4));
4770 dsrl(scratch, scratch, 8);
4771 sb(scratch, MemOperand(dst, 5));
4772 dsrl(scratch, scratch, 8);
4773 sb(scratch, MemOperand(dst, 6));
4774 dsrl(scratch, scratch, 8);
4775 sb(scratch, MemOperand(dst, 7));
4776 } else {
4777 sb(scratch, MemOperand(dst, 7));
4778 dsrl(scratch, scratch, 8);
4779 sb(scratch, MemOperand(dst, 6));
4780 dsrl(scratch, scratch, 8);
4781 sb(scratch, MemOperand(dst, 5));
4782 dsrl(scratch, scratch, 8);
4783 sb(scratch, MemOperand(dst, 4));
4784 dsrl(scratch, scratch, 8);
4785 sb(scratch, MemOperand(dst, 3));
4786 dsrl(scratch, scratch, 8);
4787 sb(scratch, MemOperand(dst, 2));
4788 dsrl(scratch, scratch, 8);
4789 sb(scratch, MemOperand(dst, 1));
4790 dsrl(scratch, scratch, 8);
4791 sb(scratch, MemOperand(dst, 0));
4792 }
4793 Daddu(dst, dst, 8);
4794
4795 Dsubu(length, length, Operand(kPointerSize));
4796 Branch(&word_loop);
4797
4798 // Copy the last bytes if any left.
4799 bind(&byte_loop);
4800 Branch(&done, eq, length, Operand(zero_reg));
4801 bind(&byte_loop_1);
4802 lbu(scratch, MemOperand(src));
4803 Daddu(src, src, 1);
4804 sb(scratch, MemOperand(dst));
4805 Daddu(dst, dst, 1);
4806 Dsubu(length, length, Operand(1));
4807 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
4808 bind(&done);
4809 }
4810
4811
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)4812 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4813 Register end_address,
4814 Register filler) {
4815 Label loop, entry;
4816 Branch(&entry);
4817 bind(&loop);
4818 sd(filler, MemOperand(current_address));
4819 Daddu(current_address, current_address, kPointerSize);
4820 bind(&entry);
4821 Branch(&loop, ult, current_address, Operand(end_address));
4822 }
4823
4824
CheckFastElements(Register map,Register scratch,Label * fail)4825 void MacroAssembler::CheckFastElements(Register map,
4826 Register scratch,
4827 Label* fail) {
4828 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4829 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4830 STATIC_ASSERT(FAST_ELEMENTS == 2);
4831 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4832 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4833 Branch(fail, hi, scratch,
4834 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4835 }
4836
4837
CheckFastObjectElements(Register map,Register scratch,Label * fail)4838 void MacroAssembler::CheckFastObjectElements(Register map,
4839 Register scratch,
4840 Label* fail) {
4841 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4842 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4843 STATIC_ASSERT(FAST_ELEMENTS == 2);
4844 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4845 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4846 Branch(fail, ls, scratch,
4847 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4848 Branch(fail, hi, scratch,
4849 Operand(Map::kMaximumBitField2FastHoleyElementValue));
4850 }
4851
4852
CheckFastSmiElements(Register map,Register scratch,Label * fail)4853 void MacroAssembler::CheckFastSmiElements(Register map,
4854 Register scratch,
4855 Label* fail) {
4856 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4857 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4858 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
4859 Branch(fail, hi, scratch,
4860 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
4861 }
4862
4863
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,Register scratch2,Label * fail,int elements_offset)4864 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
4865 Register key_reg,
4866 Register elements_reg,
4867 Register scratch1,
4868 Register scratch2,
4869 Label* fail,
4870 int elements_offset) {
4871 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
4872 Label smi_value, done;
4873
4874 // Handle smi values specially.
4875 JumpIfSmi(value_reg, &smi_value);
4876
4877 // Ensure that the object is a heap number.
4878 CheckMap(value_reg,
4879 scratch1,
4880 Heap::kHeapNumberMapRootIndex,
4881 fail,
4882 DONT_DO_SMI_CHECK);
4883
4884 // Double value, turn potential sNaN into qNan.
4885 DoubleRegister double_result = f0;
4886 DoubleRegister double_scratch = f2;
4887
4888 ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
4889 Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
4890 FPUCanonicalizeNaN(double_result, double_result);
4891
4892 bind(&smi_value);
4893 // Untag and transfer.
4894 dsrl32(scratch1, value_reg, 0);
4895 mtc1(scratch1, double_scratch);
4896 cvt_d_w(double_result, double_scratch);
4897
4898 bind(&done);
4899 Daddu(scratch1, elements_reg,
4900 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
4901 elements_offset));
4902 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
4903 Daddu(scratch1, scratch1, scratch2);
4904 // scratch1 is now effective address of the double element.
4905 sdc1(double_result, MemOperand(scratch1, 0));
4906 }
4907
SubNanPreservePayloadAndSign_s(FPURegister fd,FPURegister fs,FPURegister ft)4908 void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
4909 FPURegister fs,
4910 FPURegister ft) {
4911 FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
4912 Label check_nan, save_payload, done;
4913 Register scratch1 = t8;
4914 Register scratch2 = t9;
4915
4916 sub_s(dest, fs, ft);
4917 // Check if the result of subtraction is NaN.
4918 BranchF32(nullptr, &check_nan, eq, fs, ft);
4919 Branch(USE_DELAY_SLOT, &done);
4920 dest.is(fd) ? nop() : mov_s(fd, dest);
4921
4922 bind(&check_nan);
4923 // Check if first operand is a NaN.
4924 mfc1(scratch1, fs);
4925 BranchF32(nullptr, &save_payload, eq, fs, fs);
4926 // Second operand must be a NaN.
4927 mfc1(scratch1, ft);
4928
4929 bind(&save_payload);
4930 // Reserve payload.
4931 And(scratch1, scratch1,
4932 Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
4933 mfc1(scratch2, dest);
4934 And(scratch2, scratch2, Operand(kSingleNaNMask));
4935 Or(scratch2, scratch2, scratch1);
4936 mtc1(scratch2, fd);
4937
4938 bind(&done);
4939 }
4940
SubNanPreservePayloadAndSign_d(FPURegister fd,FPURegister fs,FPURegister ft)4941 void MacroAssembler::SubNanPreservePayloadAndSign_d(FPURegister fd,
4942 FPURegister fs,
4943 FPURegister ft) {
4944 FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
4945 Label check_nan, save_payload, done;
4946 Register scratch1 = t8;
4947 Register scratch2 = t9;
4948
4949 sub_d(dest, fs, ft);
4950 // Check if the result of subtraction is NaN.
4951 BranchF64(nullptr, &check_nan, eq, fs, ft);
4952 Branch(USE_DELAY_SLOT, &done);
4953 dest.is(fd) ? nop() : mov_d(fd, dest);
4954
4955 bind(&check_nan);
4956 // Check if first operand is a NaN.
4957 dmfc1(scratch1, fs);
4958 BranchF64(nullptr, &save_payload, eq, fs, fs);
4959 // Second operand must be a NaN.
4960 dmfc1(scratch1, ft);
4961
4962 bind(&save_payload);
4963 // Reserve payload.
4964 li(at, Operand(kDoubleSignMask | (1L << kDoubleNaNShift)));
4965 Dsubu(at, at, Operand(1));
4966 And(scratch1, scratch1, at);
4967 dmfc1(scratch2, dest);
4968 And(scratch2, scratch2, Operand(kDoubleNaNMask));
4969 Or(scratch2, scratch2, scratch1);
4970 dmtc1(scratch2, fd);
4971
4972 bind(&done);
4973 }
4974
CompareMapAndBranch(Register obj,Register scratch,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4975 void MacroAssembler::CompareMapAndBranch(Register obj,
4976 Register scratch,
4977 Handle<Map> map,
4978 Label* early_success,
4979 Condition cond,
4980 Label* branch_to) {
4981 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4982 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4983 }
4984
4985
CompareMapAndBranch(Register obj_map,Handle<Map> map,Label * early_success,Condition cond,Label * branch_to)4986 void MacroAssembler::CompareMapAndBranch(Register obj_map,
4987 Handle<Map> map,
4988 Label* early_success,
4989 Condition cond,
4990 Label* branch_to) {
4991 Branch(branch_to, cond, obj_map, Operand(map));
4992 }
4993
4994
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)4995 void MacroAssembler::CheckMap(Register obj,
4996 Register scratch,
4997 Handle<Map> map,
4998 Label* fail,
4999 SmiCheckType smi_check_type) {
5000 if (smi_check_type == DO_SMI_CHECK) {
5001 JumpIfSmi(obj, fail);
5002 }
5003 Label success;
5004 CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
5005 bind(&success);
5006 }
5007
5008
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)5009 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
5010 Register scratch2, Handle<WeakCell> cell,
5011 Handle<Code> success,
5012 SmiCheckType smi_check_type) {
5013 Label fail;
5014 if (smi_check_type == DO_SMI_CHECK) {
5015 JumpIfSmi(obj, &fail);
5016 }
5017 ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
5018 GetWeakValue(scratch2, cell);
5019 Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
5020 bind(&fail);
5021 }
5022
5023
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)5024 void MacroAssembler::CheckMap(Register obj,
5025 Register scratch,
5026 Heap::RootListIndex index,
5027 Label* fail,
5028 SmiCheckType smi_check_type) {
5029 if (smi_check_type == DO_SMI_CHECK) {
5030 JumpIfSmi(obj, fail);
5031 }
5032 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
5033 LoadRoot(at, index);
5034 Branch(fail, ne, scratch, Operand(at));
5035 }
5036
5037
GetWeakValue(Register value,Handle<WeakCell> cell)5038 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
5039 li(value, Operand(cell));
5040 ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
5041 }
5042
FPUCanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)5043 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
5044 const DoubleRegister src) {
5045 sub_d(dst, src, kDoubleRegZero);
5046 }
5047
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)5048 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
5049 Label* miss) {
5050 GetWeakValue(value, cell);
5051 JumpIfSmi(value, miss);
5052 }
5053
5054
MovFromFloatResult(const DoubleRegister dst)5055 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
5056 if (IsMipsSoftFloatABI) {
5057 if (kArchEndian == kLittle) {
5058 Move(dst, v0, v1);
5059 } else {
5060 Move(dst, v1, v0);
5061 }
5062 } else {
5063 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
5064 }
5065 }
5066
5067
MovFromFloatParameter(const DoubleRegister dst)5068 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
5069 if (IsMipsSoftFloatABI) {
5070 if (kArchEndian == kLittle) {
5071 Move(dst, a0, a1);
5072 } else {
5073 Move(dst, a1, a0);
5074 }
5075 } else {
5076 Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
5077 }
5078 }
5079
5080
MovToFloatParameter(DoubleRegister src)5081 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
5082 if (!IsMipsSoftFloatABI) {
5083 Move(f12, src);
5084 } else {
5085 if (kArchEndian == kLittle) {
5086 Move(a0, a1, src);
5087 } else {
5088 Move(a1, a0, src);
5089 }
5090 }
5091 }
5092
5093
MovToFloatResult(DoubleRegister src)5094 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
5095 if (!IsMipsSoftFloatABI) {
5096 Move(f0, src);
5097 } else {
5098 if (kArchEndian == kLittle) {
5099 Move(v0, v1, src);
5100 } else {
5101 Move(v1, v0, src);
5102 }
5103 }
5104 }
5105
5106
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)5107 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
5108 DoubleRegister src2) {
5109 if (!IsMipsSoftFloatABI) {
5110 const DoubleRegister fparg2 = f13;
5111 if (src2.is(f12)) {
5112 DCHECK(!src1.is(fparg2));
5113 Move(fparg2, src2);
5114 Move(f12, src1);
5115 } else {
5116 Move(f12, src1);
5117 Move(fparg2, src2);
5118 }
5119 } else {
5120 if (kArchEndian == kLittle) {
5121 Move(a0, a1, src1);
5122 Move(a2, a3, src2);
5123 } else {
5124 Move(a1, a0, src1);
5125 Move(a3, a2, src2);
5126 }
5127 }
5128 }
5129
5130
5131 // -----------------------------------------------------------------------------
5132 // JavaScript invokes.
5133
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)5134 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
5135 Register caller_args_count_reg,
5136 Register scratch0, Register scratch1) {
5137 #if DEBUG
5138 if (callee_args_count.is_reg()) {
5139 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
5140 scratch1));
5141 } else {
5142 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
5143 }
5144 #endif
5145
5146 // Calculate the end of destination area where we will put the arguments
5147 // after we drop current frame. We add kPointerSize to count the receiver
5148 // argument which is not included into formal parameters count.
5149 Register dst_reg = scratch0;
5150 Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
5151 Daddu(dst_reg, dst_reg,
5152 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
5153
5154 Register src_reg = caller_args_count_reg;
5155 // Calculate the end of source area. +kPointerSize is for the receiver.
5156 if (callee_args_count.is_reg()) {
5157 Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
5158 Daddu(src_reg, src_reg, Operand(kPointerSize));
5159 } else {
5160 Daddu(src_reg, sp,
5161 Operand((callee_args_count.immediate() + 1) * kPointerSize));
5162 }
5163
5164 if (FLAG_debug_code) {
5165 Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
5166 }
5167
5168 // Restore caller's frame pointer and return address now as they will be
5169 // overwritten by the copying loop.
5170 ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
5171 ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5172
5173 // Now copy callee arguments to the caller frame going backwards to avoid
5174 // callee arguments corruption (source and destination areas could overlap).
5175
5176 // Both src_reg and dst_reg are pointing to the word after the one to copy,
5177 // so they must be pre-decremented in the loop.
5178 Register tmp_reg = scratch1;
5179 Label loop, entry;
5180 Branch(&entry);
5181 bind(&loop);
5182 Dsubu(src_reg, src_reg, Operand(kPointerSize));
5183 Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
5184 ld(tmp_reg, MemOperand(src_reg));
5185 sd(tmp_reg, MemOperand(dst_reg));
5186 bind(&entry);
5187 Branch(&loop, ne, sp, Operand(src_reg));
5188
5189 // Leave current frame.
5190 mov(sp, dst_reg);
5191 }
5192
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)5193 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
5194 const ParameterCount& actual,
5195 Label* done,
5196 bool* definitely_mismatches,
5197 InvokeFlag flag,
5198 const CallWrapper& call_wrapper) {
5199 bool definitely_matches = false;
5200 *definitely_mismatches = false;
5201 Label regular_invoke;
5202
5203 // Check whether the expected and actual arguments count match. If not,
5204 // setup registers according to contract with ArgumentsAdaptorTrampoline:
5205 // a0: actual arguments count
5206 // a1: function (passed through to callee)
5207 // a2: expected arguments count
5208
5209 // The code below is made a lot easier because the calling code already sets
5210 // up actual and expected registers according to the contract if values are
5211 // passed in registers.
5212 DCHECK(actual.is_immediate() || actual.reg().is(a0));
5213 DCHECK(expected.is_immediate() || expected.reg().is(a2));
5214
5215 if (expected.is_immediate()) {
5216 DCHECK(actual.is_immediate());
5217 li(a0, Operand(actual.immediate()));
5218 if (expected.immediate() == actual.immediate()) {
5219 definitely_matches = true;
5220 } else {
5221 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
5222 if (expected.immediate() == sentinel) {
5223 // Don't worry about adapting arguments for builtins that
5224 // don't want that done. Skip adaption code by making it look
5225 // like we have a match between expected and actual number of
5226 // arguments.
5227 definitely_matches = true;
5228 } else {
5229 *definitely_mismatches = true;
5230 li(a2, Operand(expected.immediate()));
5231 }
5232 }
5233 } else if (actual.is_immediate()) {
5234 li(a0, Operand(actual.immediate()));
5235 Branch(®ular_invoke, eq, expected.reg(), Operand(a0));
5236 } else {
5237 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
5238 }
5239
5240 if (!definitely_matches) {
5241 Handle<Code> adaptor =
5242 isolate()->builtins()->ArgumentsAdaptorTrampoline();
5243 if (flag == CALL_FUNCTION) {
5244 call_wrapper.BeforeCall(CallSize(adaptor));
5245 Call(adaptor);
5246 call_wrapper.AfterCall();
5247 if (!*definitely_mismatches) {
5248 Branch(done);
5249 }
5250 } else {
5251 Jump(adaptor, RelocInfo::CODE_TARGET);
5252 }
5253 bind(®ular_invoke);
5254 }
5255 }
5256
5257
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)5258 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
5259 const ParameterCount& expected,
5260 const ParameterCount& actual) {
5261 Label skip_flooding;
5262 ExternalReference last_step_action =
5263 ExternalReference::debug_last_step_action_address(isolate());
5264 STATIC_ASSERT(StepFrame > StepIn);
5265 li(t0, Operand(last_step_action));
5266 lb(t0, MemOperand(t0));
5267 Branch(&skip_flooding, lt, t0, Operand(StepIn));
5268 {
5269 FrameScope frame(this,
5270 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
5271 if (expected.is_reg()) {
5272 SmiTag(expected.reg());
5273 Push(expected.reg());
5274 }
5275 if (actual.is_reg()) {
5276 SmiTag(actual.reg());
5277 Push(actual.reg());
5278 }
5279 if (new_target.is_valid()) {
5280 Push(new_target);
5281 }
5282 Push(fun);
5283 Push(fun);
5284 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
5285 Pop(fun);
5286 if (new_target.is_valid()) {
5287 Pop(new_target);
5288 }
5289 if (actual.is_reg()) {
5290 Pop(actual.reg());
5291 SmiUntag(actual.reg());
5292 }
5293 if (expected.is_reg()) {
5294 Pop(expected.reg());
5295 SmiUntag(expected.reg());
5296 }
5297 }
5298 bind(&skip_flooding);
5299 }
5300
5301
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5302 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
5303 const ParameterCount& expected,
5304 const ParameterCount& actual,
5305 InvokeFlag flag,
5306 const CallWrapper& call_wrapper) {
5307 // You can't call a function without a valid frame.
5308 DCHECK(flag == JUMP_FUNCTION || has_frame());
5309 DCHECK(function.is(a1));
5310 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
5311
5312 if (call_wrapper.NeedsDebugStepCheck()) {
5313 FloodFunctionIfStepping(function, new_target, expected, actual);
5314 }
5315
5316 // Clear the new.target register if not given.
5317 if (!new_target.is_valid()) {
5318 LoadRoot(a3, Heap::kUndefinedValueRootIndex);
5319 }
5320
5321 Label done;
5322 bool definitely_mismatches = false;
5323 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
5324 call_wrapper);
5325 if (!definitely_mismatches) {
5326 // We call indirectly through the code field in the function to
5327 // allow recompilation to take effect without changing any of the
5328 // call sites.
5329 Register code = t0;
5330 ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5331 if (flag == CALL_FUNCTION) {
5332 call_wrapper.BeforeCall(CallSize(code));
5333 Call(code);
5334 call_wrapper.AfterCall();
5335 } else {
5336 DCHECK(flag == JUMP_FUNCTION);
5337 Jump(code);
5338 }
5339 // Continue here if InvokePrologue does handle the invocation due to
5340 // mismatched parameter counts.
5341 bind(&done);
5342 }
5343 }
5344
5345
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5346 void MacroAssembler::InvokeFunction(Register function,
5347 Register new_target,
5348 const ParameterCount& actual,
5349 InvokeFlag flag,
5350 const CallWrapper& call_wrapper) {
5351 // You can't call a function without a valid frame.
5352 DCHECK(flag == JUMP_FUNCTION || has_frame());
5353
5354 // Contract with called JS functions requires that function is passed in a1.
5355 DCHECK(function.is(a1));
5356 Register expected_reg = a2;
5357 Register temp_reg = t0;
5358 ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
5359 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5360 // The argument count is stored as int32_t on 64-bit platforms.
5361 // TODO(plind): Smi on 32-bit platforms.
5362 lw(expected_reg,
5363 FieldMemOperand(temp_reg,
5364 SharedFunctionInfo::kFormalParameterCountOffset));
5365 ParameterCount expected(expected_reg);
5366 InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
5367 }
5368
5369
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5370 void MacroAssembler::InvokeFunction(Register function,
5371 const ParameterCount& expected,
5372 const ParameterCount& actual,
5373 InvokeFlag flag,
5374 const CallWrapper& call_wrapper) {
5375 // You can't call a function without a valid frame.
5376 DCHECK(flag == JUMP_FUNCTION || has_frame());
5377
5378 // Contract with called JS functions requires that function is passed in a1.
5379 DCHECK(function.is(a1));
5380
5381 // Get the function and setup the context.
5382 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
5383
5384 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
5385 }
5386
5387
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)5388 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
5389 const ParameterCount& expected,
5390 const ParameterCount& actual,
5391 InvokeFlag flag,
5392 const CallWrapper& call_wrapper) {
5393 li(a1, function);
5394 InvokeFunction(a1, expected, actual, flag, call_wrapper);
5395 }
5396
5397
IsObjectJSStringType(Register object,Register scratch,Label * fail)5398 void MacroAssembler::IsObjectJSStringType(Register object,
5399 Register scratch,
5400 Label* fail) {
5401 DCHECK(kNotStringTag != 0);
5402
5403 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5404 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5405 And(scratch, scratch, Operand(kIsNotStringMask));
5406 Branch(fail, ne, scratch, Operand(zero_reg));
5407 }
5408
5409
IsObjectNameType(Register object,Register scratch,Label * fail)5410 void MacroAssembler::IsObjectNameType(Register object,
5411 Register scratch,
5412 Label* fail) {
5413 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
5414 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5415 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
5416 }
5417
5418
5419 // ---------------------------------------------------------------------------
5420 // Support functions.
5421
5422
GetMapConstructor(Register result,Register map,Register temp,Register temp2)5423 void MacroAssembler::GetMapConstructor(Register result, Register map,
5424 Register temp, Register temp2) {
5425 Label done, loop;
5426 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
5427 bind(&loop);
5428 JumpIfSmi(result, &done);
5429 GetObjectType(result, temp, temp2);
5430 Branch(&done, ne, temp2, Operand(MAP_TYPE));
5431 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
5432 Branch(&loop);
5433 bind(&done);
5434 }
5435
5436
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)5437 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
5438 Register scratch, Label* miss) {
5439 // Get the prototype or initial map from the function.
5440 ld(result,
5441 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5442
5443 // If the prototype or initial map is the hole, don't return it and
5444 // simply miss the cache instead. This will allow us to allocate a
5445 // prototype object on-demand in the runtime system.
5446 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
5447 Branch(miss, eq, result, Operand(t8));
5448
5449 // If the function does not have an initial map, we're done.
5450 Label done;
5451 GetObjectType(result, scratch, scratch);
5452 Branch(&done, ne, scratch, Operand(MAP_TYPE));
5453
5454 // Get the prototype from the initial map.
5455 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
5456
5457 // All done.
5458 bind(&done);
5459 }
5460
5461
GetObjectType(Register object,Register map,Register type_reg)5462 void MacroAssembler::GetObjectType(Register object,
5463 Register map,
5464 Register type_reg) {
5465 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
5466 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
5467 }
5468
5469
5470 // -----------------------------------------------------------------------------
5471 // Runtime calls.
5472
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)5473 void MacroAssembler::CallStub(CodeStub* stub,
5474 TypeFeedbackId ast_id,
5475 Condition cond,
5476 Register r1,
5477 const Operand& r2,
5478 BranchDelaySlot bd) {
5479 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
5480 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
5481 cond, r1, r2, bd);
5482 }
5483
5484
TailCallStub(CodeStub * stub,Condition cond,Register r1,const Operand & r2,BranchDelaySlot bd)5485 void MacroAssembler::TailCallStub(CodeStub* stub,
5486 Condition cond,
5487 Register r1,
5488 const Operand& r2,
5489 BranchDelaySlot bd) {
5490 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
5491 }
5492
5493
AllowThisStubCall(CodeStub * stub)5494 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
5495 return has_frame_ || !stub->SometimesSetsUpAFrame();
5496 }
5497
5498
IndexFromHash(Register hash,Register index)5499 void MacroAssembler::IndexFromHash(Register hash, Register index) {
5500 // If the hash field contains an array index pick it out. The assert checks
5501 // that the constants for the maximum number of digits for an array index
5502 // cached in the hash field and the number of bits reserved for it does not
5503 // conflict.
5504 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
5505 (1 << String::kArrayIndexValueBits));
5506 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
5507 }
5508
5509
ObjectToDoubleFPURegister(Register object,FPURegister result,Register scratch1,Register scratch2,Register heap_number_map,Label * not_number,ObjectToDoubleFlags flags)5510 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
5511 FPURegister result,
5512 Register scratch1,
5513 Register scratch2,
5514 Register heap_number_map,
5515 Label* not_number,
5516 ObjectToDoubleFlags flags) {
5517 Label done;
5518 if ((flags & OBJECT_NOT_SMI) == 0) {
5519 Label not_smi;
5520 JumpIfNotSmi(object, ¬_smi);
5521 // Remove smi tag and convert to double.
5522 // dsra(scratch1, object, kSmiTagSize);
5523 dsra32(scratch1, object, 0);
5524 mtc1(scratch1, result);
5525 cvt_d_w(result, result);
5526 Branch(&done);
5527 bind(¬_smi);
5528 }
5529 // Check for heap number and load double value from it.
5530 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
5531 Branch(not_number, ne, scratch1, Operand(heap_number_map));
5532
5533 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
5534 // If exponent is all ones the number is either a NaN or +/-Infinity.
5535 Register exponent = scratch1;
5536 Register mask_reg = scratch2;
5537 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
5538 li(mask_reg, HeapNumber::kExponentMask);
5539
5540 And(exponent, exponent, mask_reg);
5541 Branch(not_number, eq, exponent, Operand(mask_reg));
5542 }
5543 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
5544 bind(&done);
5545 }
5546
5547
SmiToDoubleFPURegister(Register smi,FPURegister value,Register scratch1)5548 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
5549 FPURegister value,
5550 Register scratch1) {
5551 dsra32(scratch1, smi, 0);
5552 mtc1(scratch1, value);
5553 cvt_d_w(value, value);
5554 }
5555
BranchOvfHelper(MacroAssembler * masm,Register overflow_dst,Label * overflow_label,Label * no_overflow_label)5556 static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
5557 Label* overflow_label,
5558 Label* no_overflow_label) {
5559 DCHECK(overflow_label || no_overflow_label);
5560 if (!overflow_label) {
5561 DCHECK(no_overflow_label);
5562 masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
5563 } else {
5564 masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
5565 if (no_overflow_label) masm->Branch(no_overflow_label);
5566 }
5567 }
5568
AddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5569 void MacroAssembler::AddBranchOvf(Register dst, Register left,
5570 const Operand& right, Label* overflow_label,
5571 Label* no_overflow_label, Register scratch) {
5572 if (right.is_reg()) {
5573 AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5574 scratch);
5575 } else {
5576 if (kArchVariant == kMips64r6) {
5577 Register right_reg = t9;
5578 DCHECK(!left.is(right_reg));
5579 li(right_reg, Operand(right));
5580 AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
5581 } else {
5582 Register overflow_dst = t9;
5583 DCHECK(!dst.is(scratch));
5584 DCHECK(!dst.is(overflow_dst));
5585 DCHECK(!scratch.is(overflow_dst));
5586 DCHECK(!left.is(overflow_dst));
5587 if (dst.is(left)) {
5588 mov(scratch, left); // Preserve left.
5589 // Left is overwritten.
5590 Addu(dst, left, static_cast<int32_t>(right.immediate()));
5591 xor_(scratch, dst, scratch); // Original left.
5592 // Load right since xori takes uint16 as immediate.
5593 Addu(overflow_dst, zero_reg, right);
5594 xor_(overflow_dst, dst, overflow_dst);
5595 and_(overflow_dst, overflow_dst, scratch);
5596 } else {
5597 Addu(dst, left, static_cast<int32_t>(right.immediate()));
5598 xor_(overflow_dst, dst, left);
5599 // Load right since xori takes uint16 as immediate.
5600 Addu(scratch, zero_reg, right);
5601 xor_(scratch, dst, scratch);
5602 and_(overflow_dst, scratch, overflow_dst);
5603 }
5604 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5605 }
5606 }
5607 }
5608
AddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5609 void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
5610 Label* overflow_label,
5611 Label* no_overflow_label, Register scratch) {
5612 if (kArchVariant == kMips64r6) {
5613 if (!overflow_label) {
5614 DCHECK(no_overflow_label);
5615 DCHECK(!dst.is(scratch));
5616 Register left_reg = left.is(dst) ? scratch : left;
5617 Register right_reg = right.is(dst) ? t9 : right;
5618 DCHECK(!dst.is(left_reg));
5619 DCHECK(!dst.is(right_reg));
5620 Move(left_reg, left);
5621 Move(right_reg, right);
5622 addu(dst, left, right);
5623 bnvc(left_reg, right_reg, no_overflow_label);
5624 } else {
5625 bovc(left, right, overflow_label);
5626 addu(dst, left, right);
5627 if (no_overflow_label) bc(no_overflow_label);
5628 }
5629 } else {
5630 Register overflow_dst = t9;
5631 DCHECK(!dst.is(scratch));
5632 DCHECK(!dst.is(overflow_dst));
5633 DCHECK(!scratch.is(overflow_dst));
5634 DCHECK(!left.is(overflow_dst));
5635 DCHECK(!right.is(overflow_dst));
5636 DCHECK(!left.is(scratch));
5637 DCHECK(!right.is(scratch));
5638
5639 if (left.is(right) && dst.is(left)) {
5640 mov(overflow_dst, right);
5641 right = overflow_dst;
5642 }
5643
5644 if (dst.is(left)) {
5645 mov(scratch, left); // Preserve left.
5646 addu(dst, left, right); // Left is overwritten.
5647 xor_(scratch, dst, scratch); // Original left.
5648 xor_(overflow_dst, dst, right);
5649 and_(overflow_dst, overflow_dst, scratch);
5650 } else if (dst.is(right)) {
5651 mov(scratch, right); // Preserve right.
5652 addu(dst, left, right); // Right is overwritten.
5653 xor_(scratch, dst, scratch); // Original right.
5654 xor_(overflow_dst, dst, left);
5655 and_(overflow_dst, overflow_dst, scratch);
5656 } else {
5657 addu(dst, left, right);
5658 xor_(overflow_dst, dst, left);
5659 xor_(scratch, dst, right);
5660 and_(overflow_dst, scratch, overflow_dst);
5661 }
5662 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5663 }
5664 }
5665
SubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5666 void MacroAssembler::SubBranchOvf(Register dst, Register left,
5667 const Operand& right, Label* overflow_label,
5668 Label* no_overflow_label, Register scratch) {
5669 DCHECK(overflow_label || no_overflow_label);
5670 if (right.is_reg()) {
5671 SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5672 scratch);
5673 } else {
5674 Register overflow_dst = t9;
5675 DCHECK(!dst.is(scratch));
5676 DCHECK(!dst.is(overflow_dst));
5677 DCHECK(!scratch.is(overflow_dst));
5678 DCHECK(!left.is(overflow_dst));
5679 DCHECK(!left.is(scratch));
5680 if (dst.is(left)) {
5681 mov(scratch, left); // Preserve left.
5682 // Left is overwritten.
5683 Subu(dst, left, static_cast<int32_t>(right.immediate()));
5684 // Load right since xori takes uint16 as immediate.
5685 Addu(overflow_dst, zero_reg, right);
5686 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5687 xor_(scratch, dst, scratch); // scratch is original left.
5688 and_(overflow_dst, scratch, overflow_dst);
5689 } else {
5690 Subu(dst, left, right);
5691 xor_(overflow_dst, dst, left);
5692 // Load right since xori takes uint16 as immediate.
5693 Addu(scratch, zero_reg, right);
5694 xor_(scratch, left, scratch);
5695 and_(overflow_dst, scratch, overflow_dst);
5696 }
5697 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5698 }
5699 }
5700
SubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5701 void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
5702 Label* overflow_label,
5703 Label* no_overflow_label, Register scratch) {
5704 DCHECK(overflow_label || no_overflow_label);
5705 Register overflow_dst = t9;
5706 DCHECK(!dst.is(scratch));
5707 DCHECK(!dst.is(overflow_dst));
5708 DCHECK(!scratch.is(overflow_dst));
5709 DCHECK(!overflow_dst.is(left));
5710 DCHECK(!overflow_dst.is(right));
5711 DCHECK(!scratch.is(left));
5712 DCHECK(!scratch.is(right));
5713
5714 // This happens with some crankshaft code. Since Subu works fine if
5715 // left == right, let's not make that restriction here.
5716 if (left.is(right)) {
5717 mov(dst, zero_reg);
5718 if (no_overflow_label) {
5719 Branch(no_overflow_label);
5720 }
5721 }
5722
5723 if (dst.is(left)) {
5724 mov(scratch, left); // Preserve left.
5725 subu(dst, left, right); // Left is overwritten.
5726 xor_(overflow_dst, dst, scratch); // scratch is original left.
5727 xor_(scratch, scratch, right); // scratch is original left.
5728 and_(overflow_dst, scratch, overflow_dst);
5729 } else if (dst.is(right)) {
5730 mov(scratch, right); // Preserve right.
5731 subu(dst, left, right); // Right is overwritten.
5732 xor_(overflow_dst, dst, left);
5733 xor_(scratch, left, scratch); // Original right.
5734 and_(overflow_dst, scratch, overflow_dst);
5735 } else {
5736 subu(dst, left, right);
5737 xor_(overflow_dst, dst, left);
5738 xor_(scratch, left, right);
5739 and_(overflow_dst, scratch, overflow_dst);
5740 }
5741 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5742 }
5743
DaddBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5744 void MacroAssembler::DaddBranchOvf(Register dst, Register left,
5745 const Operand& right, Label* overflow_label,
5746 Label* no_overflow_label, Register scratch) {
5747 if (right.is_reg()) {
5748 DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5749 scratch);
5750 } else {
5751 Register overflow_dst = t9;
5752 DCHECK(!dst.is(scratch));
5753 DCHECK(!dst.is(overflow_dst));
5754 DCHECK(!scratch.is(overflow_dst));
5755 DCHECK(!left.is(overflow_dst));
5756 li(overflow_dst, right); // Load right.
5757 if (dst.is(left)) {
5758 mov(scratch, left); // Preserve left.
5759 Daddu(dst, left, overflow_dst); // Left is overwritten.
5760 xor_(scratch, dst, scratch); // Original left.
5761 xor_(overflow_dst, dst, overflow_dst);
5762 and_(overflow_dst, overflow_dst, scratch);
5763 } else {
5764 Daddu(dst, left, overflow_dst);
5765 xor_(scratch, dst, overflow_dst);
5766 xor_(overflow_dst, dst, left);
5767 and_(overflow_dst, scratch, overflow_dst);
5768 }
5769 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5770 }
5771 }
5772
5773
DaddBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5774 void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
5775 Label* overflow_label,
5776 Label* no_overflow_label, Register scratch) {
5777 Register overflow_dst = t9;
5778 DCHECK(!dst.is(scratch));
5779 DCHECK(!dst.is(overflow_dst));
5780 DCHECK(!scratch.is(overflow_dst));
5781 DCHECK(!left.is(overflow_dst));
5782 DCHECK(!right.is(overflow_dst));
5783 DCHECK(!left.is(scratch));
5784 DCHECK(!right.is(scratch));
5785
5786 if (left.is(right) && dst.is(left)) {
5787 mov(overflow_dst, right);
5788 right = overflow_dst;
5789 }
5790
5791 if (dst.is(left)) {
5792 mov(scratch, left); // Preserve left.
5793 daddu(dst, left, right); // Left is overwritten.
5794 xor_(scratch, dst, scratch); // Original left.
5795 xor_(overflow_dst, dst, right);
5796 and_(overflow_dst, overflow_dst, scratch);
5797 } else if (dst.is(right)) {
5798 mov(scratch, right); // Preserve right.
5799 daddu(dst, left, right); // Right is overwritten.
5800 xor_(scratch, dst, scratch); // Original right.
5801 xor_(overflow_dst, dst, left);
5802 and_(overflow_dst, overflow_dst, scratch);
5803 } else {
5804 daddu(dst, left, right);
5805 xor_(overflow_dst, dst, left);
5806 xor_(scratch, dst, right);
5807 and_(overflow_dst, scratch, overflow_dst);
5808 }
5809 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5810 }
5811
5812
DsubBranchOvf(Register dst,Register left,const Operand & right,Label * overflow_label,Label * no_overflow_label,Register scratch)5813 void MacroAssembler::DsubBranchOvf(Register dst, Register left,
5814 const Operand& right, Label* overflow_label,
5815 Label* no_overflow_label, Register scratch) {
5816 DCHECK(overflow_label || no_overflow_label);
5817 if (right.is_reg()) {
5818 DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5819 scratch);
5820 } else {
5821 Register overflow_dst = t9;
5822 DCHECK(!dst.is(scratch));
5823 DCHECK(!dst.is(overflow_dst));
5824 DCHECK(!scratch.is(overflow_dst));
5825 DCHECK(!left.is(overflow_dst));
5826 DCHECK(!left.is(scratch));
5827 li(overflow_dst, right); // Load right.
5828 if (dst.is(left)) {
5829 mov(scratch, left); // Preserve left.
5830 Dsubu(dst, left, overflow_dst); // Left is overwritten.
5831 xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
5832 xor_(scratch, dst, scratch); // scratch is original left.
5833 and_(overflow_dst, scratch, overflow_dst);
5834 } else {
5835 Dsubu(dst, left, overflow_dst);
5836 xor_(scratch, left, overflow_dst);
5837 xor_(overflow_dst, dst, left);
5838 and_(overflow_dst, scratch, overflow_dst);
5839 }
5840 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5841 }
5842 }
5843
5844
DsubBranchOvf(Register dst,Register left,Register right,Label * overflow_label,Label * no_overflow_label,Register scratch)5845 void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
5846 Label* overflow_label,
5847 Label* no_overflow_label, Register scratch) {
5848 DCHECK(overflow_label || no_overflow_label);
5849 Register overflow_dst = t9;
5850 DCHECK(!dst.is(scratch));
5851 DCHECK(!dst.is(overflow_dst));
5852 DCHECK(!scratch.is(overflow_dst));
5853 DCHECK(!overflow_dst.is(left));
5854 DCHECK(!overflow_dst.is(right));
5855 DCHECK(!scratch.is(left));
5856 DCHECK(!scratch.is(right));
5857
5858 // This happens with some crankshaft code. Since Subu works fine if
5859 // left == right, let's not make that restriction here.
5860 if (left.is(right)) {
5861 mov(dst, zero_reg);
5862 if (no_overflow_label) {
5863 Branch(no_overflow_label);
5864 }
5865 }
5866
5867 if (dst.is(left)) {
5868 mov(scratch, left); // Preserve left.
5869 dsubu(dst, left, right); // Left is overwritten.
5870 xor_(overflow_dst, dst, scratch); // scratch is original left.
5871 xor_(scratch, scratch, right); // scratch is original left.
5872 and_(overflow_dst, scratch, overflow_dst);
5873 } else if (dst.is(right)) {
5874 mov(scratch, right); // Preserve right.
5875 dsubu(dst, left, right); // Right is overwritten.
5876 xor_(overflow_dst, dst, left);
5877 xor_(scratch, left, scratch); // Original right.
5878 and_(overflow_dst, scratch, overflow_dst);
5879 } else {
5880 dsubu(dst, left, right);
5881 xor_(overflow_dst, dst, left);
5882 xor_(scratch, left, right);
5883 and_(overflow_dst, scratch, overflow_dst);
5884 }
5885 BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5886 }
5887
5888
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles,BranchDelaySlot bd)5889 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5890 SaveFPRegsMode save_doubles,
5891 BranchDelaySlot bd) {
5892 // All parameters are on the stack. v0 has the return value after call.
5893
5894 // If the expected number of arguments of the runtime function is
5895 // constant, we check that the actual number of arguments match the
5896 // expectation.
5897 CHECK(f->nargs < 0 || f->nargs == num_arguments);
5898
5899 // TODO(1236192): Most runtime routines don't need the number of
5900 // arguments passed in because it is constant. At some point we
5901 // should remove this need and make the runtime routine entry code
5902 // smarter.
5903 PrepareCEntryArgs(num_arguments);
5904 PrepareCEntryFunction(ExternalReference(f, isolate()));
5905 CEntryStub stub(isolate(), 1, save_doubles);
5906 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5907 }
5908
5909
CallExternalReference(const ExternalReference & ext,int num_arguments,BranchDelaySlot bd)5910 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5911 int num_arguments,
5912 BranchDelaySlot bd) {
5913 PrepareCEntryArgs(num_arguments);
5914 PrepareCEntryFunction(ext);
5915
5916 CEntryStub stub(isolate(), 1);
5917 CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5918 }
5919
5920
TailCallRuntime(Runtime::FunctionId fid)5921 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5922 const Runtime::Function* function = Runtime::FunctionForId(fid);
5923 DCHECK_EQ(1, function->result_size);
5924 if (function->nargs >= 0) {
5925 PrepareCEntryArgs(function->nargs);
5926 }
5927 JumpToExternalReference(ExternalReference(fid, isolate()));
5928 }
5929
5930
JumpToExternalReference(const ExternalReference & builtin,BranchDelaySlot bd)5931 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5932 BranchDelaySlot bd) {
5933 PrepareCEntryFunction(builtin);
5934 CEntryStub stub(isolate(), 1);
5935 Jump(stub.GetCode(),
5936 RelocInfo::CODE_TARGET,
5937 al,
5938 zero_reg,
5939 Operand(zero_reg),
5940 bd);
5941 }
5942
5943
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5944 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5945 Register scratch1, Register scratch2) {
5946 if (FLAG_native_code_counters && counter->Enabled()) {
5947 li(scratch1, Operand(value));
5948 li(scratch2, Operand(ExternalReference(counter)));
5949 sd(scratch1, MemOperand(scratch2));
5950 }
5951 }
5952
5953
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5954 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5955 Register scratch1, Register scratch2) {
5956 DCHECK(value > 0);
5957 if (FLAG_native_code_counters && counter->Enabled()) {
5958 li(scratch2, Operand(ExternalReference(counter)));
5959 ld(scratch1, MemOperand(scratch2));
5960 Daddu(scratch1, scratch1, Operand(value));
5961 sd(scratch1, MemOperand(scratch2));
5962 }
5963 }
5964
5965
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)5966 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5967 Register scratch1, Register scratch2) {
5968 DCHECK(value > 0);
5969 if (FLAG_native_code_counters && counter->Enabled()) {
5970 li(scratch2, Operand(ExternalReference(counter)));
5971 ld(scratch1, MemOperand(scratch2));
5972 Dsubu(scratch1, scratch1, Operand(value));
5973 sd(scratch1, MemOperand(scratch2));
5974 }
5975 }
5976
5977
5978 // -----------------------------------------------------------------------------
5979 // Debugging.
5980
Assert(Condition cc,BailoutReason reason,Register rs,Operand rt)5981 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5982 Register rs, Operand rt) {
5983 if (emit_debug_code())
5984 Check(cc, reason, rs, rt);
5985 }
5986
5987
AssertFastElements(Register elements)5988 void MacroAssembler::AssertFastElements(Register elements) {
5989 if (emit_debug_code()) {
5990 DCHECK(!elements.is(at));
5991 Label ok;
5992 push(elements);
5993 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5994 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5995 Branch(&ok, eq, elements, Operand(at));
5996 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5997 Branch(&ok, eq, elements, Operand(at));
5998 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5999 Branch(&ok, eq, elements, Operand(at));
6000 Abort(kJSObjectWithFastElementsMapHasSlowElements);
6001 bind(&ok);
6002 pop(elements);
6003 }
6004 }
6005
6006
Check(Condition cc,BailoutReason reason,Register rs,Operand rt)6007 void MacroAssembler::Check(Condition cc, BailoutReason reason,
6008 Register rs, Operand rt) {
6009 Label L;
6010 Branch(&L, cc, rs, rt);
6011 Abort(reason);
6012 // Will not return here.
6013 bind(&L);
6014 }
6015
6016
Abort(BailoutReason reason)6017 void MacroAssembler::Abort(BailoutReason reason) {
6018 Label abort_start;
6019 bind(&abort_start);
6020 #ifdef DEBUG
6021 const char* msg = GetBailoutReason(reason);
6022 if (msg != NULL) {
6023 RecordComment("Abort message: ");
6024 RecordComment(msg);
6025 }
6026
6027 if (FLAG_trap_on_abort) {
6028 stop(msg);
6029 return;
6030 }
6031 #endif
6032
6033 li(a0, Operand(Smi::FromInt(reason)));
6034 push(a0);
6035 // Disable stub call restrictions to always allow calls to abort.
6036 if (!has_frame_) {
6037 // We don't actually want to generate a pile of code for this, so just
6038 // claim there is a stack frame, without generating one.
6039 FrameScope scope(this, StackFrame::NONE);
6040 CallRuntime(Runtime::kAbort);
6041 } else {
6042 CallRuntime(Runtime::kAbort);
6043 }
6044 // Will not return here.
6045 if (is_trampoline_pool_blocked()) {
6046 // If the calling code cares about the exact number of
6047 // instructions generated, we insert padding here to keep the size
6048 // of the Abort macro constant.
6049 // Currently in debug mode with debug_code enabled the number of
6050 // generated instructions is 10, so we use this as a maximum value.
6051 static const int kExpectedAbortInstructions = 10;
6052 int abort_instructions = InstructionsGeneratedSince(&abort_start);
6053 DCHECK(abort_instructions <= kExpectedAbortInstructions);
6054 while (abort_instructions++ < kExpectedAbortInstructions) {
6055 nop();
6056 }
6057 }
6058 }
6059
6060
LoadContext(Register dst,int context_chain_length)6061 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
6062 if (context_chain_length > 0) {
6063 // Move up the chain of contexts to the context containing the slot.
6064 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
6065 for (int i = 1; i < context_chain_length; i++) {
6066 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
6067 }
6068 } else {
6069 // Slot is in the current function context. Move it into the
6070 // destination register in case we store into it (the write barrier
6071 // cannot be allowed to destroy the context in esi).
6072 Move(dst, cp);
6073 }
6074 }
6075
6076
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)6077 void MacroAssembler::LoadTransitionedArrayMapConditional(
6078 ElementsKind expected_kind,
6079 ElementsKind transitioned_kind,
6080 Register map_in_out,
6081 Register scratch,
6082 Label* no_map_match) {
6083 DCHECK(IsFastElementsKind(expected_kind));
6084 DCHECK(IsFastElementsKind(transitioned_kind));
6085
6086 // Check that the function's map is the same as the expected cached map.
6087 ld(scratch, NativeContextMemOperand());
6088 ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
6089 Branch(no_map_match, ne, map_in_out, Operand(at));
6090
6091 // Use the transitioned cached map.
6092 ld(map_in_out,
6093 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
6094 }
6095
6096
LoadNativeContextSlot(int index,Register dst)6097 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
6098 ld(dst, NativeContextMemOperand());
6099 ld(dst, ContextMemOperand(dst, index));
6100 }
6101
6102
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)6103 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
6104 Register map,
6105 Register scratch) {
6106 // Load the initial map. The global functions all have initial maps.
6107 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
6108 if (emit_debug_code()) {
6109 Label ok, fail;
6110 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
6111 Branch(&ok);
6112 bind(&fail);
6113 Abort(kGlobalFunctionsMustHaveInitialMap);
6114 bind(&ok);
6115 }
6116 }
6117
StubPrologue(StackFrame::Type type)6118 void MacroAssembler::StubPrologue(StackFrame::Type type) {
6119 li(at, Operand(Smi::FromInt(type)));
6120 PushCommonFrame(at);
6121 }
6122
6123
Prologue(bool code_pre_aging)6124 void MacroAssembler::Prologue(bool code_pre_aging) {
6125 PredictableCodeSizeScope predictible_code_size_scope(
6126 this, kNoCodeAgeSequenceLength);
6127 // The following three instructions must remain together and unmodified
6128 // for code aging to work properly.
6129 if (code_pre_aging) {
6130 // Pre-age the code.
6131 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
6132 nop(Assembler::CODE_AGE_MARKER_NOP);
6133 // Load the stub address to t9 and call it,
6134 // GetCodeAgeAndParity() extracts the stub address from this instruction.
6135 li(t9,
6136 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
6137 ADDRESS_LOAD);
6138 nop(); // Prevent jalr to jal optimization.
6139 jalr(t9, a0);
6140 nop(); // Branch delay slot nop.
6141 nop(); // Pad the empty space.
6142 } else {
6143 PushStandardFrame(a1);
6144 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
6145 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
6146 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
6147 }
6148 }
6149
EmitLoadTypeFeedbackVector(Register vector)6150 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
6151 ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
6152 ld(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
6153 ld(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
6154 }
6155
6156
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)6157 void MacroAssembler::EnterFrame(StackFrame::Type type,
6158 bool load_constant_pool_pointer_reg) {
6159 // Out-of-line constant pool not implemented on mips64.
6160 UNREACHABLE();
6161 }
6162
6163
EnterFrame(StackFrame::Type type)6164 void MacroAssembler::EnterFrame(StackFrame::Type type) {
6165 int stack_offset, fp_offset;
6166 if (type == StackFrame::INTERNAL) {
6167 stack_offset = -4 * kPointerSize;
6168 fp_offset = 2 * kPointerSize;
6169 } else {
6170 stack_offset = -3 * kPointerSize;
6171 fp_offset = 1 * kPointerSize;
6172 }
6173 daddiu(sp, sp, stack_offset);
6174 stack_offset = -stack_offset - kPointerSize;
6175 sd(ra, MemOperand(sp, stack_offset));
6176 stack_offset -= kPointerSize;
6177 sd(fp, MemOperand(sp, stack_offset));
6178 stack_offset -= kPointerSize;
6179 li(t9, Operand(Smi::FromInt(type)));
6180 sd(t9, MemOperand(sp, stack_offset));
6181 if (type == StackFrame::INTERNAL) {
6182 DCHECK_EQ(stack_offset, kPointerSize);
6183 li(t9, Operand(CodeObject()));
6184 sd(t9, MemOperand(sp, 0));
6185 } else {
6186 DCHECK_EQ(stack_offset, 0);
6187 }
6188 // Adjust FP to point to saved FP.
6189 Daddu(fp, sp, Operand(fp_offset));
6190 }
6191
6192
LeaveFrame(StackFrame::Type type)6193 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
6194 daddiu(sp, fp, 2 * kPointerSize);
6195 ld(ra, MemOperand(fp, 1 * kPointerSize));
6196 ld(fp, MemOperand(fp, 0 * kPointerSize));
6197 }
6198
EnterExitFrame(bool save_doubles,int stack_space)6199 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
6200 // Set up the frame structure on the stack.
6201 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
6202 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
6203 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
6204
6205 // This is how the stack will look:
6206 // fp + 2 (==kCallerSPDisplacement) - old stack's end
6207 // [fp + 1 (==kCallerPCOffset)] - saved old ra
6208 // [fp + 0 (==kCallerFPOffset)] - saved old fp
6209 // [fp - 1 StackFrame::EXIT Smi
6210 // [fp - 2 (==kSPOffset)] - sp of the called function
6211 // [fp - 3 (==kCodeOffset)] - CodeObject
6212 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
6213 // new stack (will contain saved ra)
6214
6215 // Save registers and reserve room for saved entry sp and code object.
6216 daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
6217 sd(ra, MemOperand(sp, 4 * kPointerSize));
6218 sd(fp, MemOperand(sp, 3 * kPointerSize));
6219 li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
6220 sd(at, MemOperand(sp, 2 * kPointerSize));
6221 // Set up new frame pointer.
6222 daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
6223
6224 if (emit_debug_code()) {
6225 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
6226 }
6227
6228 // Accessed from ExitFrame::code_slot.
6229 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
6230 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
6231
6232 // Save the frame pointer and the context in top.
6233 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
6234 sd(fp, MemOperand(t8));
6235 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6236 sd(cp, MemOperand(t8));
6237
6238 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
6239 if (save_doubles) {
6240 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
6241 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
6242 int space = kNumOfSavedRegisters * kDoubleSize;
6243 Dsubu(sp, sp, Operand(space));
6244 // Remember: we only need to save every 2nd double FPU value.
6245 for (int i = 0; i < kNumOfSavedRegisters; i++) {
6246 FPURegister reg = FPURegister::from_code(2 * i);
6247 sdc1(reg, MemOperand(sp, i * kDoubleSize));
6248 }
6249 }
6250
6251 // Reserve place for the return address, stack space and an optional slot
6252 // (used by the DirectCEntryStub to hold the return value if a struct is
6253 // returned) and align the frame preparing for calling the runtime function.
6254 DCHECK(stack_space >= 0);
6255 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
6256 if (frame_alignment > 0) {
6257 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6258 And(sp, sp, Operand(-frame_alignment)); // Align stack.
6259 }
6260
6261 // Set the exit frame sp value to point just before the return address
6262 // location.
6263 daddiu(at, sp, kPointerSize);
6264 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
6265 }
6266
6267
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool do_return,bool argument_count_is_length)6268 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
6269 bool restore_context, bool do_return,
6270 bool argument_count_is_length) {
6271 // Optionally restore all double registers.
6272 if (save_doubles) {
6273 // Remember: we only need to restore every 2nd double FPU value.
6274 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
6275 Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
6276 kNumOfSavedRegisters * kDoubleSize));
6277 for (int i = 0; i < kNumOfSavedRegisters; i++) {
6278 FPURegister reg = FPURegister::from_code(2 * i);
6279 ldc1(reg, MemOperand(t8, i * kDoubleSize));
6280 }
6281 }
6282
6283 // Clear top frame.
6284 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
6285 sd(zero_reg, MemOperand(t8));
6286
6287 // Restore current context from top and clear it in debug mode.
6288 if (restore_context) {
6289 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6290 ld(cp, MemOperand(t8));
6291 }
6292 #ifdef DEBUG
6293 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
6294 sd(a3, MemOperand(t8));
6295 #endif
6296
6297 // Pop the arguments, restore registers, and return.
6298 mov(sp, fp); // Respect ABI stack constraint.
6299 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
6300 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
6301
6302 if (argument_count.is_valid()) {
6303 if (argument_count_is_length) {
6304 daddu(sp, sp, argument_count);
6305 } else {
6306 Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
6307 }
6308 }
6309
6310 if (do_return) {
6311 Ret(USE_DELAY_SLOT);
6312 // If returning, the instruction in the delay slot will be the addiu below.
6313 }
6314 daddiu(sp, sp, 2 * kPointerSize);
6315 }
6316
6317
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)6318 void MacroAssembler::InitializeNewString(Register string,
6319 Register length,
6320 Heap::RootListIndex map_index,
6321 Register scratch1,
6322 Register scratch2) {
6323 // dsll(scratch1, length, kSmiTagSize);
6324 dsll32(scratch1, length, 0);
6325 LoadRoot(scratch2, map_index);
6326 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
6327 li(scratch1, Operand(String::kEmptyHashField));
6328 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
6329 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
6330 }
6331
6332
ActivationFrameAlignment()6333 int MacroAssembler::ActivationFrameAlignment() {
6334 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6335 // Running on the real platform. Use the alignment as mandated by the local
6336 // environment.
6337 // Note: This will break if we ever start generating snapshots on one Mips
6338 // platform for another Mips platform with a different alignment.
6339 return base::OS::ActivationFrameAlignment();
6340 #else // V8_HOST_ARCH_MIPS
6341 // If we are using the simulator then we should always align to the expected
6342 // alignment. As the simulator is used to generate snapshots we do not know
6343 // if the target platform will need alignment, so this is controlled from a
6344 // flag.
6345 return FLAG_sim_stack_alignment;
6346 #endif // V8_HOST_ARCH_MIPS
6347 }
6348
6349
AssertStackIsAligned()6350 void MacroAssembler::AssertStackIsAligned() {
6351 if (emit_debug_code()) {
6352 const int frame_alignment = ActivationFrameAlignment();
6353 const int frame_alignment_mask = frame_alignment - 1;
6354
6355 if (frame_alignment > kPointerSize) {
6356 Label alignment_as_expected;
6357 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6358 andi(at, sp, frame_alignment_mask);
6359 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6360 // Don't use Check here, as it will call Runtime_Abort re-entering here.
6361 stop("Unexpected stack alignment");
6362 bind(&alignment_as_expected);
6363 }
6364 }
6365 }
6366
6367
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)6368 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
6369 Register reg,
6370 Register scratch,
6371 Label* not_power_of_two_or_zero) {
6372 Dsubu(scratch, reg, Operand(1));
6373 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
6374 scratch, Operand(zero_reg));
6375 and_(at, scratch, reg); // In the delay slot.
6376 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
6377 }
6378
6379
SmiTagCheckOverflow(Register reg,Register overflow)6380 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
6381 DCHECK(!reg.is(overflow));
6382 mov(overflow, reg); // Save original value.
6383 SmiTag(reg);
6384 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
6385 }
6386
6387
SmiTagCheckOverflow(Register dst,Register src,Register overflow)6388 void MacroAssembler::SmiTagCheckOverflow(Register dst,
6389 Register src,
6390 Register overflow) {
6391 if (dst.is(src)) {
6392 // Fall back to slower case.
6393 SmiTagCheckOverflow(dst, overflow);
6394 } else {
6395 DCHECK(!dst.is(src));
6396 DCHECK(!dst.is(overflow));
6397 DCHECK(!src.is(overflow));
6398 SmiTag(dst, src);
6399 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
6400 }
6401 }
6402
6403
SmiLoadUntag(Register dst,MemOperand src)6404 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
6405 if (SmiValuesAre32Bits()) {
6406 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
6407 } else {
6408 lw(dst, src);
6409 SmiUntag(dst);
6410 }
6411 }
6412
6413
SmiLoadScale(Register dst,MemOperand src,int scale)6414 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
6415 if (SmiValuesAre32Bits()) {
6416 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
6417 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
6418 dsll(dst, dst, scale);
6419 } else {
6420 lw(dst, src);
6421 DCHECK(scale >= kSmiTagSize);
6422 sll(dst, dst, scale - kSmiTagSize);
6423 }
6424 }
6425
6426
6427 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
SmiLoadWithScale(Register d_smi,Register d_scaled,MemOperand src,int scale)6428 void MacroAssembler::SmiLoadWithScale(Register d_smi,
6429 Register d_scaled,
6430 MemOperand src,
6431 int scale) {
6432 if (SmiValuesAre32Bits()) {
6433 ld(d_smi, src);
6434 dsra(d_scaled, d_smi, kSmiShift - scale);
6435 } else {
6436 lw(d_smi, src);
6437 DCHECK(scale >= kSmiTagSize);
6438 sll(d_scaled, d_smi, scale - kSmiTagSize);
6439 }
6440 }
6441
6442
6443 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
SmiLoadUntagWithScale(Register d_int,Register d_scaled,MemOperand src,int scale)6444 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
6445 Register d_scaled,
6446 MemOperand src,
6447 int scale) {
6448 if (SmiValuesAre32Bits()) {
6449 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
6450 dsll(d_scaled, d_int, scale);
6451 } else {
6452 lw(d_int, src);
6453 // Need both the int and the scaled in, so use two instructions.
6454 SmiUntag(d_int);
6455 sll(d_scaled, d_int, scale);
6456 }
6457 }
6458
6459
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)6460 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
6461 Register src,
6462 Label* smi_case) {
6463 // DCHECK(!dst.is(src));
6464 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
6465 SmiUntag(dst, src);
6466 }
6467
6468
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)6469 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
6470 Register src,
6471 Label* non_smi_case) {
6472 // DCHECK(!dst.is(src));
6473 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
6474 SmiUntag(dst, src);
6475 }
6476
JumpIfSmi(Register value,Label * smi_label,Register scratch,BranchDelaySlot bd)6477 void MacroAssembler::JumpIfSmi(Register value,
6478 Label* smi_label,
6479 Register scratch,
6480 BranchDelaySlot bd) {
6481 DCHECK_EQ(0, kSmiTag);
6482 andi(scratch, value, kSmiTagMask);
6483 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
6484 }
6485
JumpIfNotSmi(Register value,Label * not_smi_label,Register scratch,BranchDelaySlot bd)6486 void MacroAssembler::JumpIfNotSmi(Register value,
6487 Label* not_smi_label,
6488 Register scratch,
6489 BranchDelaySlot bd) {
6490 DCHECK_EQ(0, kSmiTag);
6491 andi(scratch, value, kSmiTagMask);
6492 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
6493 }
6494
6495
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)6496 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
6497 Register reg2,
6498 Label* on_not_both_smi) {
6499 STATIC_ASSERT(kSmiTag == 0);
6500 // TODO(plind): Find some better to fix this assert issue.
6501 #if defined(__APPLE__)
6502 DCHECK_EQ(1, kSmiTagMask);
6503 #else
6504 DCHECK_EQ((int64_t)1, kSmiTagMask);
6505 #endif
6506 or_(at, reg1, reg2);
6507 JumpIfNotSmi(at, on_not_both_smi);
6508 }
6509
6510
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)6511 void MacroAssembler::JumpIfEitherSmi(Register reg1,
6512 Register reg2,
6513 Label* on_either_smi) {
6514 STATIC_ASSERT(kSmiTag == 0);
6515 // TODO(plind): Find some better to fix this assert issue.
6516 #if defined(__APPLE__)
6517 DCHECK_EQ(1, kSmiTagMask);
6518 #else
6519 DCHECK_EQ((int64_t)1, kSmiTagMask);
6520 #endif
6521 // Both Smi tags must be 1 (not Smi).
6522 and_(at, reg1, reg2);
6523 JumpIfSmi(at, on_either_smi);
6524 }
6525
AssertNotNumber(Register object)6526 void MacroAssembler::AssertNotNumber(Register object) {
6527 if (emit_debug_code()) {
6528 STATIC_ASSERT(kSmiTag == 0);
6529 andi(at, object, kSmiTagMask);
6530 Check(ne, kOperandIsANumber, at, Operand(zero_reg));
6531 GetObjectType(object, t8, t8);
6532 Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
6533 }
6534 }
6535
AssertNotSmi(Register object)6536 void MacroAssembler::AssertNotSmi(Register object) {
6537 if (emit_debug_code()) {
6538 STATIC_ASSERT(kSmiTag == 0);
6539 andi(at, object, kSmiTagMask);
6540 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
6541 }
6542 }
6543
6544
AssertSmi(Register object)6545 void MacroAssembler::AssertSmi(Register object) {
6546 if (emit_debug_code()) {
6547 STATIC_ASSERT(kSmiTag == 0);
6548 andi(at, object, kSmiTagMask);
6549 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
6550 }
6551 }
6552
6553
AssertString(Register object)6554 void MacroAssembler::AssertString(Register object) {
6555 if (emit_debug_code()) {
6556 STATIC_ASSERT(kSmiTag == 0);
6557 SmiTst(object, t8);
6558 Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
6559 GetObjectType(object, t8, t8);
6560 Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
6561 }
6562 }
6563
6564
AssertName(Register object)6565 void MacroAssembler::AssertName(Register object) {
6566 if (emit_debug_code()) {
6567 STATIC_ASSERT(kSmiTag == 0);
6568 SmiTst(object, t8);
6569 Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
6570 GetObjectType(object, t8, t8);
6571 Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
6572 }
6573 }
6574
6575
AssertFunction(Register object)6576 void MacroAssembler::AssertFunction(Register object) {
6577 if (emit_debug_code()) {
6578 STATIC_ASSERT(kSmiTag == 0);
6579 SmiTst(object, t8);
6580 Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
6581 GetObjectType(object, t8, t8);
6582 Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
6583 }
6584 }
6585
6586
AssertBoundFunction(Register object)6587 void MacroAssembler::AssertBoundFunction(Register object) {
6588 if (emit_debug_code()) {
6589 STATIC_ASSERT(kSmiTag == 0);
6590 SmiTst(object, t8);
6591 Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
6592 GetObjectType(object, t8, t8);
6593 Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
6594 }
6595 }
6596
AssertGeneratorObject(Register object)6597 void MacroAssembler::AssertGeneratorObject(Register object) {
6598 if (emit_debug_code()) {
6599 STATIC_ASSERT(kSmiTag == 0);
6600 SmiTst(object, t8);
6601 Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
6602 GetObjectType(object, t8, t8);
6603 Check(eq, kOperandIsNotAGeneratorObject, t8,
6604 Operand(JS_GENERATOR_OBJECT_TYPE));
6605 }
6606 }
6607
AssertReceiver(Register object)6608 void MacroAssembler::AssertReceiver(Register object) {
6609 if (emit_debug_code()) {
6610 STATIC_ASSERT(kSmiTag == 0);
6611 SmiTst(object, t8);
6612 Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
6613 GetObjectType(object, t8, t8);
6614 Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
6615 }
6616 }
6617
6618
AssertUndefinedOrAllocationSite(Register object,Register scratch)6619 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
6620 Register scratch) {
6621 if (emit_debug_code()) {
6622 Label done_checking;
6623 AssertNotSmi(object);
6624 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
6625 Branch(&done_checking, eq, object, Operand(scratch));
6626 ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
6627 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
6628 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
6629 bind(&done_checking);
6630 }
6631 }
6632
6633
AssertIsRoot(Register reg,Heap::RootListIndex index)6634 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
6635 if (emit_debug_code()) {
6636 DCHECK(!reg.is(at));
6637 LoadRoot(at, index);
6638 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
6639 }
6640 }
6641
6642
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)6643 void MacroAssembler::JumpIfNotHeapNumber(Register object,
6644 Register heap_number_map,
6645 Register scratch,
6646 Label* on_not_heap_number) {
6647 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
6648 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
6649 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
6650 }
6651
6652
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6653 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
6654 Register first, Register second, Register scratch1, Register scratch2,
6655 Label* failure) {
6656 // Test that both first and second are sequential one-byte strings.
6657 // Assume that they are non-smis.
6658 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
6659 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
6660 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
6661 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
6662
6663 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
6664 scratch2, failure);
6665 }
6666
6667
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6668 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
6669 Register second,
6670 Register scratch1,
6671 Register scratch2,
6672 Label* failure) {
6673 // Check that neither is a smi.
6674 STATIC_ASSERT(kSmiTag == 0);
6675 And(scratch1, first, Operand(second));
6676 JumpIfSmi(scratch1, failure);
6677 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
6678 scratch2, failure);
6679 }
6680
6681
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)6682 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
6683 Register first, Register second, Register scratch1, Register scratch2,
6684 Label* failure) {
6685 const int kFlatOneByteStringMask =
6686 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6687 const int kFlatOneByteStringTag =
6688 kStringTag | kOneByteStringTag | kSeqStringTag;
6689 DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
6690 andi(scratch1, first, kFlatOneByteStringMask);
6691 Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
6692 andi(scratch2, second, kFlatOneByteStringMask);
6693 Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
6694 }
6695
6696
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)6697 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
6698 Register scratch,
6699 Label* failure) {
6700 const int kFlatOneByteStringMask =
6701 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6702 const int kFlatOneByteStringTag =
6703 kStringTag | kOneByteStringTag | kSeqStringTag;
6704 And(scratch, type, Operand(kFlatOneByteStringMask));
6705 Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
6706 }
6707
6708 static const int kRegisterPassedArguments = 8;
6709
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)6710 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6711 int num_double_arguments) {
6712 int stack_passed_words = 0;
6713 num_reg_arguments += 2 * num_double_arguments;
6714
6715 // O32: Up to four simple arguments are passed in registers a0..a3.
6716 // N64: Up to eight simple arguments are passed in registers a0..a7.
6717 if (num_reg_arguments > kRegisterPassedArguments) {
6718 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
6719 }
6720 stack_passed_words += kCArgSlotCount;
6721 return stack_passed_words;
6722 }
6723
6724
EmitSeqStringSetCharCheck(Register string,Register index,Register value,Register scratch,uint32_t encoding_mask)6725 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
6726 Register index,
6727 Register value,
6728 Register scratch,
6729 uint32_t encoding_mask) {
6730 Label is_object;
6731 SmiTst(string, at);
6732 Check(ne, kNonObject, at, Operand(zero_reg));
6733
6734 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
6735 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6736
6737 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6738 li(scratch, Operand(encoding_mask));
6739 Check(eq, kUnexpectedStringType, at, Operand(scratch));
6740
6741 // TODO(plind): requires Smi size check code for mips32.
6742
6743 ld(at, FieldMemOperand(string, String::kLengthOffset));
6744 Check(lt, kIndexIsTooLarge, index, Operand(at));
6745
6746 DCHECK(Smi::FromInt(0) == 0);
6747 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6748 }
6749
6750
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)6751 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6752 int num_double_arguments,
6753 Register scratch) {
6754 int frame_alignment = ActivationFrameAlignment();
6755
6756 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
6757 // O32: Up to four simple arguments are passed in registers a0..a3.
6758 // Those four arguments must have reserved argument slots on the stack for
6759 // mips, even though those argument slots are not normally used.
6760 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
6761 // address than) the (O32) argument slots. (arg slot calculation handled by
6762 // CalculateStackPassedWords()).
6763 int stack_passed_arguments = CalculateStackPassedWords(
6764 num_reg_arguments, num_double_arguments);
6765 if (frame_alignment > kPointerSize) {
6766 // Make stack end at alignment and make room for num_arguments - 4 words
6767 // and the original value of sp.
6768 mov(scratch, sp);
6769 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6770 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6771 And(sp, sp, Operand(-frame_alignment));
6772 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6773 } else {
6774 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6775 }
6776 }
6777
6778
PrepareCallCFunction(int num_reg_arguments,Register scratch)6779 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6780 Register scratch) {
6781 PrepareCallCFunction(num_reg_arguments, 0, scratch);
6782 }
6783
6784
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)6785 void MacroAssembler::CallCFunction(ExternalReference function,
6786 int num_reg_arguments,
6787 int num_double_arguments) {
6788 li(t8, Operand(function));
6789 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6790 }
6791
6792
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)6793 void MacroAssembler::CallCFunction(Register function,
6794 int num_reg_arguments,
6795 int num_double_arguments) {
6796 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6797 }
6798
6799
CallCFunction(ExternalReference function,int num_arguments)6800 void MacroAssembler::CallCFunction(ExternalReference function,
6801 int num_arguments) {
6802 CallCFunction(function, num_arguments, 0);
6803 }
6804
6805
CallCFunction(Register function,int num_arguments)6806 void MacroAssembler::CallCFunction(Register function,
6807 int num_arguments) {
6808 CallCFunction(function, num_arguments, 0);
6809 }
6810
6811
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)6812 void MacroAssembler::CallCFunctionHelper(Register function,
6813 int num_reg_arguments,
6814 int num_double_arguments) {
6815 DCHECK(has_frame());
6816 // Make sure that the stack is aligned before calling a C function unless
6817 // running in the simulator. The simulator has its own alignment check which
6818 // provides more information.
6819 // The argument stots are presumed to have been set up by
6820 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6821
6822 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6823 if (emit_debug_code()) {
6824 int frame_alignment = base::OS::ActivationFrameAlignment();
6825 int frame_alignment_mask = frame_alignment - 1;
6826 if (frame_alignment > kPointerSize) {
6827 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6828 Label alignment_as_expected;
6829 And(at, sp, Operand(frame_alignment_mask));
6830 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6831 // Don't use Check here, as it will call Runtime_Abort possibly
6832 // re-entering here.
6833 stop("Unexpected alignment in CallCFunction");
6834 bind(&alignment_as_expected);
6835 }
6836 }
6837 #endif // V8_HOST_ARCH_MIPS
6838
6839 // Just call directly. The function called cannot cause a GC, or
6840 // allow preemption, so the return address in the link register
6841 // stays correct.
6842
6843 if (!function.is(t9)) {
6844 mov(t9, function);
6845 function = t9;
6846 }
6847
6848 Call(function);
6849
6850 int stack_passed_arguments = CalculateStackPassedWords(
6851 num_reg_arguments, num_double_arguments);
6852
6853 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6854 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6855 } else {
6856 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6857 }
6858 }
6859
6860
6861 #undef BRANCH_ARGS_CHECK
6862
6863
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)6864 void MacroAssembler::CheckPageFlag(
6865 Register object,
6866 Register scratch,
6867 int mask,
6868 Condition cc,
6869 Label* condition_met) {
6870 And(scratch, object, Operand(~Page::kPageAlignmentMask));
6871 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6872 And(scratch, scratch, Operand(mask));
6873 Branch(condition_met, cc, scratch, Operand(zero_reg));
6874 }
6875
6876
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)6877 void MacroAssembler::JumpIfBlack(Register object,
6878 Register scratch0,
6879 Register scratch1,
6880 Label* on_black) {
6881 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
6882 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6883 }
6884
6885
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)6886 void MacroAssembler::HasColor(Register object,
6887 Register bitmap_scratch,
6888 Register mask_scratch,
6889 Label* has_color,
6890 int first_bit,
6891 int second_bit) {
6892 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6893 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6894
6895 GetMarkBits(object, bitmap_scratch, mask_scratch);
6896
6897 Label other_color;
6898 // Note that we are using two 4-byte aligned loads.
6899 LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6900 And(t8, t9, Operand(mask_scratch));
6901 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6902 // Shift left 1 by adding.
6903 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
6904 And(t8, t9, Operand(mask_scratch));
6905 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6906
6907 bind(&other_color);
6908 }
6909
6910
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)6911 void MacroAssembler::GetMarkBits(Register addr_reg,
6912 Register bitmap_reg,
6913 Register mask_reg) {
6914 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6915 // addr_reg is divided into fields:
6916 // |63 page base 20|19 high 8|7 shift 3|2 0|
6917 // 'high' gives the index of the cell holding color bits for the object.
6918 // 'shift' gives the offset in the cell for this object's color.
6919 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6920 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6921 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6922 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
6923 Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
6924 li(t8, Operand(1));
6925 dsllv(mask_reg, t8, mask_reg);
6926 }
6927
6928
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)6929 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6930 Register mask_scratch, Register load_scratch,
6931 Label* value_is_white) {
6932 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6933 GetMarkBits(value, bitmap_scratch, mask_scratch);
6934
6935 // If the value is black or grey we don't need to do anything.
6936 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
6937 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6938 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
6939 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6940
6941 // Since both black and grey have a 1 in the first position and white does
6942 // not have a 1 there we only need to check one bit.
6943 // Note that we are using a 4-byte aligned 8-byte load.
6944 if (emit_debug_code()) {
6945 LoadWordPair(load_scratch,
6946 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6947 } else {
6948 lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6949 }
6950 And(t8, mask_scratch, load_scratch);
6951 Branch(value_is_white, eq, t8, Operand(zero_reg));
6952 }
6953
6954
LoadInstanceDescriptors(Register map,Register descriptors)6955 void MacroAssembler::LoadInstanceDescriptors(Register map,
6956 Register descriptors) {
6957 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6958 }
6959
6960
NumberOfOwnDescriptors(Register dst,Register map)6961 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
6962 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
6963 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6964 }
6965
6966
EnumLength(Register dst,Register map)6967 void MacroAssembler::EnumLength(Register dst, Register map) {
6968 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
6969 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
6970 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6971 SmiTag(dst);
6972 }
6973
6974
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)6975 void MacroAssembler::LoadAccessor(Register dst, Register holder,
6976 int accessor_index,
6977 AccessorComponent accessor) {
6978 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6979 LoadInstanceDescriptors(dst, dst);
6980 ld(dst,
6981 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6982 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6983 : AccessorPair::kSetterOffset;
6984 ld(dst, FieldMemOperand(dst, offset));
6985 }
6986
6987
CheckEnumCache(Label * call_runtime)6988 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6989 Register null_value = a5;
6990 Register empty_fixed_array_value = a6;
6991 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6992 Label next, start;
6993 mov(a2, a0);
6994
6995 // Check if the enum length field is properly initialized, indicating that
6996 // there is an enum cache.
6997 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6998
6999 EnumLength(a3, a1);
7000 Branch(
7001 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
7002
7003 LoadRoot(null_value, Heap::kNullValueRootIndex);
7004 jmp(&start);
7005
7006 bind(&next);
7007 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
7008
7009 // For all objects but the receiver, check that the cache is empty.
7010 EnumLength(a3, a1);
7011 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
7012
7013 bind(&start);
7014
7015 // Check that there are no elements. Register a2 contains the current JS
7016 // object we've reached through the prototype chain.
7017 Label no_elements;
7018 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
7019 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
7020
7021 // Second chance, the object may be using the empty slow element dictionary.
7022 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
7023 Branch(call_runtime, ne, a2, Operand(at));
7024
7025 bind(&no_elements);
7026 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
7027 Branch(&next, ne, a2, Operand(null_value));
7028 }
7029
7030
ClampUint8(Register output_reg,Register input_reg)7031 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
7032 DCHECK(!output_reg.is(input_reg));
7033 Label done;
7034 li(output_reg, Operand(255));
7035 // Normal branch: nop in delay slot.
7036 Branch(&done, gt, input_reg, Operand(output_reg));
7037 // Use delay slot in this branch.
7038 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
7039 mov(output_reg, zero_reg); // In delay slot.
7040 mov(output_reg, input_reg); // Value is in range 0..255.
7041 bind(&done);
7042 }
7043
7044
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister temp_double_reg)7045 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
7046 DoubleRegister input_reg,
7047 DoubleRegister temp_double_reg) {
7048 Label above_zero;
7049 Label done;
7050 Label in_bounds;
7051
7052 Move(temp_double_reg, 0.0);
7053 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
7054
7055 // Double value is less than zero, NaN or Inf, return 0.
7056 mov(result_reg, zero_reg);
7057 Branch(&done);
7058
7059 // Double value is >= 255, return 255.
7060 bind(&above_zero);
7061 Move(temp_double_reg, 255.0);
7062 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
7063 li(result_reg, Operand(255));
7064 Branch(&done);
7065
7066 // In 0-255 range, round and truncate.
7067 bind(&in_bounds);
7068 cvt_w_d(temp_double_reg, input_reg);
7069 mfc1(result_reg, temp_double_reg);
7070 bind(&done);
7071 }
7072
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Label * no_memento_found)7073 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
7074 Register scratch_reg,
7075 Label* no_memento_found) {
7076 Label map_check;
7077 Label top_check;
7078 ExternalReference new_space_allocation_top_adr =
7079 ExternalReference::new_space_allocation_top_address(isolate());
7080 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
7081 const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
7082
7083 // Bail out if the object is not in new space.
7084 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
7085 // If the object is in new space, we need to check whether it is on the same
7086 // page as the current top.
7087 Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
7088 li(at, Operand(new_space_allocation_top_adr));
7089 ld(at, MemOperand(at));
7090 Xor(scratch_reg, scratch_reg, Operand(at));
7091 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
7092 Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
7093 // The object is on a different page than allocation top. Bail out if the
7094 // object sits on the page boundary as no memento can follow and we cannot
7095 // touch the memory following it.
7096 Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
7097 Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
7098 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
7099 Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
7100 // Continue with the actual map check.
7101 jmp(&map_check);
7102 // If top is on the same page as the current object, we need to check whether
7103 // we are below top.
7104 bind(&top_check);
7105 Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
7106 li(at, Operand(new_space_allocation_top_adr));
7107 ld(at, MemOperand(at));
7108 Branch(no_memento_found, gt, scratch_reg, Operand(at));
7109 // Memento map check.
7110 bind(&map_check);
7111 ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
7112 Branch(no_memento_found, ne, scratch_reg,
7113 Operand(isolate()->factory()->allocation_memento_map()));
7114 }
7115
7116
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)7117 Register GetRegisterThatIsNotOneOf(Register reg1,
7118 Register reg2,
7119 Register reg3,
7120 Register reg4,
7121 Register reg5,
7122 Register reg6) {
7123 RegList regs = 0;
7124 if (reg1.is_valid()) regs |= reg1.bit();
7125 if (reg2.is_valid()) regs |= reg2.bit();
7126 if (reg3.is_valid()) regs |= reg3.bit();
7127 if (reg4.is_valid()) regs |= reg4.bit();
7128 if (reg5.is_valid()) regs |= reg5.bit();
7129 if (reg6.is_valid()) regs |= reg6.bit();
7130
7131 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
7132 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
7133 int code = config->GetAllocatableGeneralCode(i);
7134 Register candidate = Register::from_code(code);
7135 if (regs & candidate.bit()) continue;
7136 return candidate;
7137 }
7138 UNREACHABLE();
7139 return no_reg;
7140 }
7141
7142
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)7143 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
7144 Register object,
7145 Register scratch0,
7146 Register scratch1,
7147 Label* found) {
7148 DCHECK(!scratch1.is(scratch0));
7149 Factory* factory = isolate()->factory();
7150 Register current = scratch0;
7151 Label loop_again, end;
7152
7153 // Scratch contained elements pointer.
7154 Move(current, object);
7155 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
7156 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
7157 Branch(&end, eq, current, Operand(factory->null_value()));
7158
7159 // Loop based on the map going up the prototype chain.
7160 bind(&loop_again);
7161 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
7162 lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
7163 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
7164 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
7165 Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
7166 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
7167 DecodeField<Map::ElementsKindBits>(scratch1);
7168 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
7169 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
7170 Branch(&loop_again, ne, current, Operand(factory->null_value()));
7171
7172 bind(&end);
7173 }
7174
7175
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)7176 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
7177 Register reg5, Register reg6, Register reg7, Register reg8,
7178 Register reg9, Register reg10) {
7179 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
7180 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
7181 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
7182 reg10.is_valid();
7183
7184 RegList regs = 0;
7185 if (reg1.is_valid()) regs |= reg1.bit();
7186 if (reg2.is_valid()) regs |= reg2.bit();
7187 if (reg3.is_valid()) regs |= reg3.bit();
7188 if (reg4.is_valid()) regs |= reg4.bit();
7189 if (reg5.is_valid()) regs |= reg5.bit();
7190 if (reg6.is_valid()) regs |= reg6.bit();
7191 if (reg7.is_valid()) regs |= reg7.bit();
7192 if (reg8.is_valid()) regs |= reg8.bit();
7193 if (reg9.is_valid()) regs |= reg9.bit();
7194 if (reg10.is_valid()) regs |= reg10.bit();
7195 int n_of_non_aliasing_regs = NumRegs(regs);
7196
7197 return n_of_valid_regs != n_of_non_aliasing_regs;
7198 }
7199
7200
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)7201 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
7202 FlushICache flush_cache)
7203 : address_(address),
7204 size_(instructions * Assembler::kInstrSize),
7205 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
7206 flush_cache_(flush_cache) {
7207 // Create a new macro assembler pointing to the address of the code to patch.
7208 // The size is adjusted with kGap on order for the assembler to generate size
7209 // bytes of instructions without failing with buffer size constraints.
7210 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
7211 }
7212
7213
~CodePatcher()7214 CodePatcher::~CodePatcher() {
7215 // Indicate that code has changed.
7216 if (flush_cache_ == FLUSH) {
7217 Assembler::FlushICache(masm_.isolate(), address_, size_);
7218 }
7219 // Check that the code was patched as expected.
7220 DCHECK(masm_.pc_ == address_ + size_);
7221 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
7222 }
7223
7224
Emit(Instr instr)7225 void CodePatcher::Emit(Instr instr) {
7226 masm()->emit(instr);
7227 }
7228
7229
Emit(Address addr)7230 void CodePatcher::Emit(Address addr) {
7231 // masm()->emit(reinterpret_cast<Instr>(addr));
7232 }
7233
7234
ChangeBranchCondition(Instr current_instr,uint32_t new_opcode)7235 void CodePatcher::ChangeBranchCondition(Instr current_instr,
7236 uint32_t new_opcode) {
7237 current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
7238 masm_.emit(current_instr);
7239 }
7240
7241
TruncatingDiv(Register result,Register dividend,int32_t divisor)7242 void MacroAssembler::TruncatingDiv(Register result,
7243 Register dividend,
7244 int32_t divisor) {
7245 DCHECK(!dividend.is(result));
7246 DCHECK(!dividend.is(at));
7247 DCHECK(!result.is(at));
7248 base::MagicNumbersForDivision<uint32_t> mag =
7249 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
7250 li(at, Operand(static_cast<int32_t>(mag.multiplier)));
7251 Mulh(result, dividend, Operand(at));
7252 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
7253 if (divisor > 0 && neg) {
7254 Addu(result, result, Operand(dividend));
7255 }
7256 if (divisor < 0 && !neg && mag.multiplier > 0) {
7257 Subu(result, result, Operand(dividend));
7258 }
7259 if (mag.shift > 0) sra(result, result, mag.shift);
7260 srl(at, dividend, 31);
7261 Addu(result, result, Operand(at));
7262 }
7263
7264
7265 } // namespace internal
7266 } // namespace v8
7267
7268 #endif // V8_TARGET_ARCH_MIPS64
7269