1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7
8 #if V8_TARGET_ARCH_PPC
9
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/debug/debug.h"
15 #include "src/register-configuration.h"
16 #include "src/runtime/runtime.h"
17
18 #include "src/ppc/macro-assembler-ppc.h"
19
20 namespace v8 {
21 namespace internal {
22
MacroAssembler(Isolate * arg_isolate,void * buffer,int size,CodeObjectRequired create_code_object)23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
24 CodeObjectRequired create_code_object)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
27 has_frame_(false) {
28 if (create_code_object == CodeObjectRequired::kYes) {
29 code_object_ =
30 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
31 }
32 }
33
34
Jump(Register target)35 void MacroAssembler::Jump(Register target) {
36 mtctr(target);
37 bctr();
38 }
39
40
JumpToJSEntry(Register target)41 void MacroAssembler::JumpToJSEntry(Register target) {
42 Move(ip, target);
43 Jump(ip);
44 }
45
46
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister cr)47 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
48 Condition cond, CRegister cr) {
49 Label skip;
50
51 if (cond != al) b(NegateCondition(cond), &skip, cr);
52
53 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
54
55 mov(ip, Operand(target, rmode));
56 mtctr(ip);
57 bctr();
58
59 bind(&skip);
60 }
61
62
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)63 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
64 CRegister cr) {
65 DCHECK(!RelocInfo::IsCodeTarget(rmode));
66 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
67 }
68
69
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)70 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
71 Condition cond) {
72 DCHECK(RelocInfo::IsCodeTarget(rmode));
73 // 'code' is always generated ppc code, never THUMB code
74 AllowDeferredHandleDereference embedding_raw_address;
75 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
76 }
77
78
CallSize(Register target)79 int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
80
81
Call(Register target)82 void MacroAssembler::Call(Register target) {
83 BlockTrampolinePoolScope block_trampoline_pool(this);
84 Label start;
85 bind(&start);
86
87 // branch via link register and set LK bit for return point
88 mtctr(target);
89 bctrl();
90
91 DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
92 }
93
94
CallJSEntry(Register target)95 void MacroAssembler::CallJSEntry(Register target) {
96 DCHECK(target.is(ip));
97 Call(target);
98 }
99
100
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)101 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
102 Condition cond) {
103 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
104 return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
105 }
106
107
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)108 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
109 RelocInfo::Mode rmode,
110 Condition cond) {
111 return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
112 }
113
114
Call(Address target,RelocInfo::Mode rmode,Condition cond)115 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
116 Condition cond) {
117 BlockTrampolinePoolScope block_trampoline_pool(this);
118 DCHECK(cond == al);
119
120 #ifdef DEBUG
121 // Check the expected size before generating code to ensure we assume the same
122 // constant pool availability (e.g., whether constant pool is full or not).
123 int expected_size = CallSize(target, rmode, cond);
124 Label start;
125 bind(&start);
126 #endif
127 // This can likely be optimized to make use of bc() with 24bit relative
128 //
129 // RecordRelocInfo(x.rmode_, x.imm_);
130 // bc( BA, .... offset, LKset);
131 //
132
133 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
134 mtctr(ip);
135 bctrl();
136
137 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
138 }
139
140
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)141 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
142 TypeFeedbackId ast_id, Condition cond) {
143 AllowDeferredHandleDereference using_raw_address;
144 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
145 }
146
147
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id,Condition cond)148 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
149 TypeFeedbackId ast_id, Condition cond) {
150 BlockTrampolinePoolScope block_trampoline_pool(this);
151 DCHECK(RelocInfo::IsCodeTarget(rmode));
152
153 #ifdef DEBUG
154 // Check the expected size before generating code to ensure we assume the same
155 // constant pool availability (e.g., whether constant pool is full or not).
156 int expected_size = CallSize(code, rmode, ast_id, cond);
157 Label start;
158 bind(&start);
159 #endif
160
161 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
162 SetRecordedAstId(ast_id);
163 rmode = RelocInfo::CODE_TARGET_WITH_ID;
164 }
165 AllowDeferredHandleDereference using_raw_address;
166 Call(reinterpret_cast<Address>(code.location()), rmode, cond);
167 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
168 }
169
170
Drop(int count)171 void MacroAssembler::Drop(int count) {
172 if (count > 0) {
173 Add(sp, sp, count * kPointerSize, r0);
174 }
175 }
176
Drop(Register count,Register scratch)177 void MacroAssembler::Drop(Register count, Register scratch) {
178 ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
179 add(sp, sp, scratch);
180 }
181
Call(Label * target)182 void MacroAssembler::Call(Label* target) { b(target, SetLK); }
183
184
Push(Handle<Object> handle)185 void MacroAssembler::Push(Handle<Object> handle) {
186 mov(r0, Operand(handle));
187 push(r0);
188 }
189
190
Move(Register dst,Handle<Object> value)191 void MacroAssembler::Move(Register dst, Handle<Object> value) {
192 mov(dst, Operand(value));
193 }
194
195
Move(Register dst,Register src,Condition cond)196 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
197 DCHECK(cond == al);
198 if (!dst.is(src)) {
199 mr(dst, src);
200 }
201 }
202
203
Move(DoubleRegister dst,DoubleRegister src)204 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
205 if (!dst.is(src)) {
206 fmr(dst, src);
207 }
208 }
209
210
MultiPush(RegList regs,Register location)211 void MacroAssembler::MultiPush(RegList regs, Register location) {
212 int16_t num_to_push = NumberOfBitsSet(regs);
213 int16_t stack_offset = num_to_push * kPointerSize;
214
215 subi(location, location, Operand(stack_offset));
216 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
217 if ((regs & (1 << i)) != 0) {
218 stack_offset -= kPointerSize;
219 StoreP(ToRegister(i), MemOperand(location, stack_offset));
220 }
221 }
222 }
223
224
MultiPop(RegList regs,Register location)225 void MacroAssembler::MultiPop(RegList regs, Register location) {
226 int16_t stack_offset = 0;
227
228 for (int16_t i = 0; i < Register::kNumRegisters; i++) {
229 if ((regs & (1 << i)) != 0) {
230 LoadP(ToRegister(i), MemOperand(location, stack_offset));
231 stack_offset += kPointerSize;
232 }
233 }
234 addi(location, location, Operand(stack_offset));
235 }
236
237
MultiPushDoubles(RegList dregs,Register location)238 void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) {
239 int16_t num_to_push = NumberOfBitsSet(dregs);
240 int16_t stack_offset = num_to_push * kDoubleSize;
241
242 subi(location, location, Operand(stack_offset));
243 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
244 if ((dregs & (1 << i)) != 0) {
245 DoubleRegister dreg = DoubleRegister::from_code(i);
246 stack_offset -= kDoubleSize;
247 stfd(dreg, MemOperand(location, stack_offset));
248 }
249 }
250 }
251
252
MultiPopDoubles(RegList dregs,Register location)253 void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) {
254 int16_t stack_offset = 0;
255
256 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
257 if ((dregs & (1 << i)) != 0) {
258 DoubleRegister dreg = DoubleRegister::from_code(i);
259 lfd(dreg, MemOperand(location, stack_offset));
260 stack_offset += kDoubleSize;
261 }
262 }
263 addi(location, location, Operand(stack_offset));
264 }
265
266
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)267 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
268 Condition cond) {
269 DCHECK(cond == al);
270 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
271 }
272
273
StoreRoot(Register source,Heap::RootListIndex index,Condition cond)274 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
275 Condition cond) {
276 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
277 DCHECK(cond == al);
278 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
279 }
280
281
InNewSpace(Register object,Register scratch,Condition cond,Label * branch)282 void MacroAssembler::InNewSpace(Register object, Register scratch,
283 Condition cond, Label* branch) {
284 DCHECK(cond == eq || cond == ne);
285 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
286 }
287
288
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)289 void MacroAssembler::RecordWriteField(
290 Register object, int offset, Register value, Register dst,
291 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
292 RememberedSetAction remembered_set_action, SmiCheck smi_check,
293 PointersToHereCheck pointers_to_here_check_for_value) {
294 // First, check if a write barrier is even needed. The tests below
295 // catch stores of Smis.
296 Label done;
297
298 // Skip barrier if writing a smi.
299 if (smi_check == INLINE_SMI_CHECK) {
300 JumpIfSmi(value, &done);
301 }
302
303 // Although the object register is tagged, the offset is relative to the start
304 // of the object, so so offset must be a multiple of kPointerSize.
305 DCHECK(IsAligned(offset, kPointerSize));
306
307 Add(dst, object, offset - kHeapObjectTag, r0);
308 if (emit_debug_code()) {
309 Label ok;
310 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
311 beq(&ok, cr0);
312 stop("Unaligned cell in write barrier");
313 bind(&ok);
314 }
315
316 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
317 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
318
319 bind(&done);
320
321 // Clobber clobbered input registers when running with the debug-code flag
322 // turned on to provoke errors.
323 if (emit_debug_code()) {
324 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
325 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
326 }
327 }
328
329
330 // Will clobber 4 registers: object, map, dst, ip. The
331 // register 'object' contains a heap object pointer.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)332 void MacroAssembler::RecordWriteForMap(Register object, Register map,
333 Register dst,
334 LinkRegisterStatus lr_status,
335 SaveFPRegsMode fp_mode) {
336 if (emit_debug_code()) {
337 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
338 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
339 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
340 }
341
342 if (!FLAG_incremental_marking) {
343 return;
344 }
345
346 if (emit_debug_code()) {
347 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
348 cmp(ip, map);
349 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
350 }
351
352 Label done;
353
354 // A single check of the map's pages interesting flag suffices, since it is
355 // only set during incremental collection, and then it's also guaranteed that
356 // the from object's page's interesting flag is also set. This optimization
357 // relies on the fact that maps can never be in new space.
358 CheckPageFlag(map,
359 map, // Used as scratch.
360 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
361
362 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
363 if (emit_debug_code()) {
364 Label ok;
365 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
366 beq(&ok, cr0);
367 stop("Unaligned cell in write barrier");
368 bind(&ok);
369 }
370
371 // Record the actual write.
372 if (lr_status == kLRHasNotBeenSaved) {
373 mflr(r0);
374 push(r0);
375 }
376 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
377 fp_mode);
378 CallStub(&stub);
379 if (lr_status == kLRHasNotBeenSaved) {
380 pop(r0);
381 mtlr(r0);
382 }
383
384 bind(&done);
385
386 // Count number of write barriers in generated code.
387 isolate()->counters()->write_barriers_static()->Increment();
388 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
389
390 // Clobber clobbered registers when running with the debug-code flag
391 // turned on to provoke errors.
392 if (emit_debug_code()) {
393 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
394 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
395 }
396 }
397
398
399 // Will clobber 4 registers: object, address, scratch, ip. The
400 // register 'object' contains a heap object pointer. The heap object
401 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)402 void MacroAssembler::RecordWrite(
403 Register object, Register address, Register value,
404 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
405 RememberedSetAction remembered_set_action, SmiCheck smi_check,
406 PointersToHereCheck pointers_to_here_check_for_value) {
407 DCHECK(!object.is(value));
408 if (emit_debug_code()) {
409 LoadP(r0, MemOperand(address));
410 cmp(r0, value);
411 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
412 }
413
414 if (remembered_set_action == OMIT_REMEMBERED_SET &&
415 !FLAG_incremental_marking) {
416 return;
417 }
418
419 // First, check if a write barrier is even needed. The tests below
420 // catch stores of smis and stores into the young generation.
421 Label done;
422
423 if (smi_check == INLINE_SMI_CHECK) {
424 JumpIfSmi(value, &done);
425 }
426
427 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
428 CheckPageFlag(value,
429 value, // Used as scratch.
430 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
431 }
432 CheckPageFlag(object,
433 value, // Used as scratch.
434 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
435
436 // Record the actual write.
437 if (lr_status == kLRHasNotBeenSaved) {
438 mflr(r0);
439 push(r0);
440 }
441 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
442 fp_mode);
443 CallStub(&stub);
444 if (lr_status == kLRHasNotBeenSaved) {
445 pop(r0);
446 mtlr(r0);
447 }
448
449 bind(&done);
450
451 // Count number of write barriers in generated code.
452 isolate()->counters()->write_barriers_static()->Increment();
453 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
454 value);
455
456 // Clobber clobbered registers when running with the debug-code flag
457 // turned on to provoke errors.
458 if (emit_debug_code()) {
459 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
460 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
461 }
462 }
463
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)464 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
465 Register code_entry,
466 Register scratch) {
467 const int offset = JSFunction::kCodeEntryOffset;
468
469 // Since a code entry (value) is always in old space, we don't need to update
470 // remembered set. If incremental marking is off, there is nothing for us to
471 // do.
472 if (!FLAG_incremental_marking) return;
473
474 DCHECK(js_function.is(r4));
475 DCHECK(code_entry.is(r7));
476 DCHECK(scratch.is(r8));
477 AssertNotSmi(js_function);
478
479 if (emit_debug_code()) {
480 addi(scratch, js_function, Operand(offset - kHeapObjectTag));
481 LoadP(ip, MemOperand(scratch));
482 cmp(ip, code_entry);
483 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
484 }
485
486 // First, check if a write barrier is even needed. The tests below
487 // catch stores of Smis and stores into young gen.
488 Label done;
489
490 CheckPageFlag(code_entry, scratch,
491 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
492 CheckPageFlag(js_function, scratch,
493 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
494
495 const Register dst = scratch;
496 addi(dst, js_function, Operand(offset - kHeapObjectTag));
497
498 // Save caller-saved registers. js_function and code_entry are in the
499 // caller-saved register list.
500 DCHECK(kJSCallerSaved & js_function.bit());
501 DCHECK(kJSCallerSaved & code_entry.bit());
502 mflr(r0);
503 MultiPush(kJSCallerSaved | r0.bit());
504
505 int argument_count = 3;
506 PrepareCallCFunction(argument_count, code_entry);
507
508 mr(r3, js_function);
509 mr(r4, dst);
510 mov(r5, Operand(ExternalReference::isolate_address(isolate())));
511
512 {
513 AllowExternalCallThatCantCauseGC scope(this);
514 CallCFunction(
515 ExternalReference::incremental_marking_record_write_code_entry_function(
516 isolate()),
517 argument_count);
518 }
519
520 // Restore caller-saved registers (including js_function and code_entry).
521 MultiPop(kJSCallerSaved | r0.bit());
522 mtlr(r0);
523
524 bind(&done);
525 }
526
RememberedSetHelper(Register object,Register address,Register scratch,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)527 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
528 Register address, Register scratch,
529 SaveFPRegsMode fp_mode,
530 RememberedSetFinalAction and_then) {
531 Label done;
532 if (emit_debug_code()) {
533 Label ok;
534 JumpIfNotInNewSpace(object, scratch, &ok);
535 stop("Remembered set pointer is in new space");
536 bind(&ok);
537 }
538 // Load store buffer top.
539 ExternalReference store_buffer =
540 ExternalReference::store_buffer_top(isolate());
541 mov(ip, Operand(store_buffer));
542 LoadP(scratch, MemOperand(ip));
543 // Store pointer to buffer and increment buffer top.
544 StoreP(address, MemOperand(scratch));
545 addi(scratch, scratch, Operand(kPointerSize));
546 // Write back new top of buffer.
547 StoreP(scratch, MemOperand(ip));
548 // Call stub on end of buffer.
549 // Check for end of buffer.
550 TestBitMask(scratch, StoreBuffer::kStoreBufferMask, r0);
551
552 if (and_then == kFallThroughAtEnd) {
553 bne(&done, cr0);
554 } else {
555 DCHECK(and_then == kReturnAtEnd);
556 Ret(ne, cr0);
557 }
558 mflr(r0);
559 push(r0);
560 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
561 CallStub(&store_buffer_overflow);
562 pop(r0);
563 mtlr(r0);
564 bind(&done);
565 if (and_then == kReturnAtEnd) {
566 Ret();
567 }
568 }
569
PushCommonFrame(Register marker_reg)570 void MacroAssembler::PushCommonFrame(Register marker_reg) {
571 int fp_delta = 0;
572 mflr(r0);
573 if (FLAG_enable_embedded_constant_pool) {
574 if (marker_reg.is_valid()) {
575 Push(r0, fp, kConstantPoolRegister, marker_reg);
576 fp_delta = 2;
577 } else {
578 Push(r0, fp, kConstantPoolRegister);
579 fp_delta = 1;
580 }
581 } else {
582 if (marker_reg.is_valid()) {
583 Push(r0, fp, marker_reg);
584 fp_delta = 1;
585 } else {
586 Push(r0, fp);
587 fp_delta = 0;
588 }
589 }
590 addi(fp, sp, Operand(fp_delta * kPointerSize));
591 }
592
PopCommonFrame(Register marker_reg)593 void MacroAssembler::PopCommonFrame(Register marker_reg) {
594 if (FLAG_enable_embedded_constant_pool) {
595 if (marker_reg.is_valid()) {
596 Pop(r0, fp, kConstantPoolRegister, marker_reg);
597 } else {
598 Pop(r0, fp, kConstantPoolRegister);
599 }
600 } else {
601 if (marker_reg.is_valid()) {
602 Pop(r0, fp, marker_reg);
603 } else {
604 Pop(r0, fp);
605 }
606 }
607 mtlr(r0);
608 }
609
PushStandardFrame(Register function_reg)610 void MacroAssembler::PushStandardFrame(Register function_reg) {
611 int fp_delta = 0;
612 mflr(r0);
613 if (FLAG_enable_embedded_constant_pool) {
614 if (function_reg.is_valid()) {
615 Push(r0, fp, kConstantPoolRegister, cp, function_reg);
616 fp_delta = 3;
617 } else {
618 Push(r0, fp, kConstantPoolRegister, cp);
619 fp_delta = 2;
620 }
621 } else {
622 if (function_reg.is_valid()) {
623 Push(r0, fp, cp, function_reg);
624 fp_delta = 2;
625 } else {
626 Push(r0, fp, cp);
627 fp_delta = 1;
628 }
629 }
630 addi(fp, sp, Operand(fp_delta * kPointerSize));
631 }
632
RestoreFrameStateForTailCall()633 void MacroAssembler::RestoreFrameStateForTailCall() {
634 if (FLAG_enable_embedded_constant_pool) {
635 LoadP(kConstantPoolRegister,
636 MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
637 set_constant_pool_available(false);
638 }
639 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
640 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
641 mtlr(r0);
642 }
643
644 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
645 const int MacroAssembler::kNumSafepointSavedRegisters =
646 Register::kNumAllocatable;
647
648 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()649 void MacroAssembler::PushSafepointRegisters() {
650 // Safepoints expect a block of kNumSafepointRegisters values on the
651 // stack, so adjust the stack for unsaved registers.
652 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
653 DCHECK(num_unsaved >= 0);
654 if (num_unsaved > 0) {
655 subi(sp, sp, Operand(num_unsaved * kPointerSize));
656 }
657 MultiPush(kSafepointSavedRegisters);
658 }
659
660
PopSafepointRegisters()661 void MacroAssembler::PopSafepointRegisters() {
662 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
663 MultiPop(kSafepointSavedRegisters);
664 if (num_unsaved > 0) {
665 addi(sp, sp, Operand(num_unsaved * kPointerSize));
666 }
667 }
668
669
StoreToSafepointRegisterSlot(Register src,Register dst)670 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
671 StoreP(src, SafepointRegisterSlot(dst));
672 }
673
674
LoadFromSafepointRegisterSlot(Register dst,Register src)675 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
676 LoadP(dst, SafepointRegisterSlot(src));
677 }
678
679
SafepointRegisterStackIndex(int reg_code)680 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
681 // The registers are pushed starting with the highest encoding,
682 // which means that lowest encodings are closest to the stack pointer.
683 RegList regs = kSafepointSavedRegisters;
684 int index = 0;
685
686 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
687
688 for (int16_t i = 0; i < reg_code; i++) {
689 if ((regs & (1 << i)) != 0) {
690 index++;
691 }
692 }
693
694 return index;
695 }
696
697
SafepointRegisterSlot(Register reg)698 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
699 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
700 }
701
702
SafepointRegistersAndDoublesSlot(Register reg)703 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
704 // General purpose registers are pushed last on the stack.
705 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
706 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
707 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
708 return MemOperand(sp, doubles_size + register_offset);
709 }
710
711
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)712 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
713 const DoubleRegister src) {
714 // Turn potential sNaN into qNaN.
715 fsub(dst, src, kDoubleRegZero);
716 }
717
ConvertIntToDouble(Register src,DoubleRegister dst)718 void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
719 MovIntToDouble(dst, src, r0);
720 fcfid(dst, dst);
721 }
722
ConvertUnsignedIntToDouble(Register src,DoubleRegister dst)723 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
724 DoubleRegister dst) {
725 MovUnsignedIntToDouble(dst, src, r0);
726 fcfid(dst, dst);
727 }
728
ConvertIntToFloat(Register src,DoubleRegister dst)729 void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
730 MovIntToDouble(dst, src, r0);
731 fcfids(dst, dst);
732 }
733
ConvertUnsignedIntToFloat(Register src,DoubleRegister dst)734 void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
735 DoubleRegister dst) {
736 MovUnsignedIntToDouble(dst, src, r0);
737 fcfids(dst, dst);
738 }
739
740 #if V8_TARGET_ARCH_PPC64
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)741 void MacroAssembler::ConvertInt64ToDouble(Register src,
742 DoubleRegister double_dst) {
743 MovInt64ToDouble(double_dst, src);
744 fcfid(double_dst, double_dst);
745 }
746
747
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)748 void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
749 DoubleRegister double_dst) {
750 MovInt64ToDouble(double_dst, src);
751 fcfidus(double_dst, double_dst);
752 }
753
754
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)755 void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
756 DoubleRegister double_dst) {
757 MovInt64ToDouble(double_dst, src);
758 fcfidu(double_dst, double_dst);
759 }
760
761
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)762 void MacroAssembler::ConvertInt64ToFloat(Register src,
763 DoubleRegister double_dst) {
764 MovInt64ToDouble(double_dst, src);
765 fcfids(double_dst, double_dst);
766 }
767 #endif
768
769
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)770 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
771 #if !V8_TARGET_ARCH_PPC64
772 const Register dst_hi,
773 #endif
774 const Register dst,
775 const DoubleRegister double_dst,
776 FPRoundingMode rounding_mode) {
777 if (rounding_mode == kRoundToZero) {
778 fctidz(double_dst, double_input);
779 } else {
780 SetRoundingMode(rounding_mode);
781 fctid(double_dst, double_input);
782 ResetRoundingMode();
783 }
784
785 MovDoubleToInt64(
786 #if !V8_TARGET_ARCH_PPC64
787 dst_hi,
788 #endif
789 dst, double_dst);
790 }
791
792 #if V8_TARGET_ARCH_PPC64
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)793 void MacroAssembler::ConvertDoubleToUnsignedInt64(
794 const DoubleRegister double_input, const Register dst,
795 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
796 if (rounding_mode == kRoundToZero) {
797 fctiduz(double_dst, double_input);
798 } else {
799 SetRoundingMode(rounding_mode);
800 fctidu(double_dst, double_input);
801 ResetRoundingMode();
802 }
803
804 MovDoubleToInt64(dst, double_dst);
805 }
806 #endif
807
808 #if !V8_TARGET_ARCH_PPC64
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)809 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
810 Register src_low, Register src_high,
811 Register scratch, Register shift) {
812 DCHECK(!AreAliased(dst_low, src_high, shift));
813 DCHECK(!AreAliased(dst_high, src_low, shift));
814 Label less_than_32;
815 Label done;
816 cmpi(shift, Operand(32));
817 blt(&less_than_32);
818 // If shift >= 32
819 andi(scratch, shift, Operand(0x1f));
820 slw(dst_high, src_low, scratch);
821 li(dst_low, Operand::Zero());
822 b(&done);
823 bind(&less_than_32);
824 // If shift < 32
825 subfic(scratch, shift, Operand(32));
826 slw(dst_high, src_high, shift);
827 srw(scratch, src_low, scratch);
828 orx(dst_high, dst_high, scratch);
829 slw(dst_low, src_low, shift);
830 bind(&done);
831 }
832
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)833 void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
834 Register src_low, Register src_high,
835 uint32_t shift) {
836 DCHECK(!AreAliased(dst_low, src_high));
837 DCHECK(!AreAliased(dst_high, src_low));
838 if (shift == 32) {
839 Move(dst_high, src_low);
840 li(dst_low, Operand::Zero());
841 } else if (shift > 32) {
842 shift &= 0x1f;
843 slwi(dst_high, src_low, Operand(shift));
844 li(dst_low, Operand::Zero());
845 } else if (shift == 0) {
846 Move(dst_low, src_low);
847 Move(dst_high, src_high);
848 } else {
849 slwi(dst_high, src_high, Operand(shift));
850 rlwimi(dst_high, src_low, shift, 32 - shift, 31);
851 slwi(dst_low, src_low, Operand(shift));
852 }
853 }
854
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)855 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
856 Register src_low, Register src_high,
857 Register scratch, Register shift) {
858 DCHECK(!AreAliased(dst_low, src_high, shift));
859 DCHECK(!AreAliased(dst_high, src_low, shift));
860 Label less_than_32;
861 Label done;
862 cmpi(shift, Operand(32));
863 blt(&less_than_32);
864 // If shift >= 32
865 andi(scratch, shift, Operand(0x1f));
866 srw(dst_low, src_high, scratch);
867 li(dst_high, Operand::Zero());
868 b(&done);
869 bind(&less_than_32);
870 // If shift < 32
871 subfic(scratch, shift, Operand(32));
872 srw(dst_low, src_low, shift);
873 slw(scratch, src_high, scratch);
874 orx(dst_low, dst_low, scratch);
875 srw(dst_high, src_high, shift);
876 bind(&done);
877 }
878
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)879 void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
880 Register src_low, Register src_high,
881 uint32_t shift) {
882 DCHECK(!AreAliased(dst_low, src_high));
883 DCHECK(!AreAliased(dst_high, src_low));
884 if (shift == 32) {
885 Move(dst_low, src_high);
886 li(dst_high, Operand::Zero());
887 } else if (shift > 32) {
888 shift &= 0x1f;
889 srwi(dst_low, src_high, Operand(shift));
890 li(dst_high, Operand::Zero());
891 } else if (shift == 0) {
892 Move(dst_low, src_low);
893 Move(dst_high, src_high);
894 } else {
895 srwi(dst_low, src_low, Operand(shift));
896 rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
897 srwi(dst_high, src_high, Operand(shift));
898 }
899 }
900
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)901 void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
902 Register src_low, Register src_high,
903 Register scratch, Register shift) {
904 DCHECK(!AreAliased(dst_low, src_high, shift));
905 DCHECK(!AreAliased(dst_high, src_low, shift));
906 Label less_than_32;
907 Label done;
908 cmpi(shift, Operand(32));
909 blt(&less_than_32);
910 // If shift >= 32
911 andi(scratch, shift, Operand(0x1f));
912 sraw(dst_low, src_high, scratch);
913 srawi(dst_high, src_high, 31);
914 b(&done);
915 bind(&less_than_32);
916 // If shift < 32
917 subfic(scratch, shift, Operand(32));
918 srw(dst_low, src_low, shift);
919 slw(scratch, src_high, scratch);
920 orx(dst_low, dst_low, scratch);
921 sraw(dst_high, src_high, shift);
922 bind(&done);
923 }
924
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)925 void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
926 Register src_low, Register src_high,
927 uint32_t shift) {
928 DCHECK(!AreAliased(dst_low, src_high));
929 DCHECK(!AreAliased(dst_high, src_low));
930 if (shift == 32) {
931 Move(dst_low, src_high);
932 srawi(dst_high, src_high, 31);
933 } else if (shift > 32) {
934 shift &= 0x1f;
935 srawi(dst_low, src_high, shift);
936 srawi(dst_high, src_high, 31);
937 } else if (shift == 0) {
938 Move(dst_low, src_low);
939 Move(dst_high, src_high);
940 } else {
941 srwi(dst_low, src_low, Operand(shift));
942 rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
943 srawi(dst_high, src_high, shift);
944 }
945 }
946 #endif
947
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)948 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
949 Register code_target_address) {
950 lwz(kConstantPoolRegister,
951 MemOperand(code_target_address,
952 Code::kConstantPoolOffset - Code::kHeaderSize));
953 add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
954 }
955
956
LoadConstantPoolPointerRegister(Register base,int code_start_delta)957 void MacroAssembler::LoadConstantPoolPointerRegister(Register base,
958 int code_start_delta) {
959 add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(),
960 code_start_delta);
961 }
962
963
LoadConstantPoolPointerRegister()964 void MacroAssembler::LoadConstantPoolPointerRegister() {
965 mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
966 }
967
StubPrologue(StackFrame::Type type,Register base,int prologue_offset)968 void MacroAssembler::StubPrologue(StackFrame::Type type, Register base,
969 int prologue_offset) {
970 {
971 ConstantPoolUnavailableScope constant_pool_unavailable(this);
972 LoadSmiLiteral(r11, Smi::FromInt(type));
973 PushCommonFrame(r11);
974 }
975 if (FLAG_enable_embedded_constant_pool) {
976 if (!base.is(no_reg)) {
977 // base contains prologue address
978 LoadConstantPoolPointerRegister(base, -prologue_offset);
979 } else {
980 LoadConstantPoolPointerRegister();
981 }
982 set_constant_pool_available(true);
983 }
984 }
985
986
Prologue(bool code_pre_aging,Register base,int prologue_offset)987 void MacroAssembler::Prologue(bool code_pre_aging, Register base,
988 int prologue_offset) {
989 DCHECK(!base.is(no_reg));
990 {
991 PredictableCodeSizeScope predictible_code_size_scope(
992 this, kNoCodeAgeSequenceLength);
993 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
994 // The following instructions must remain together and unmodified
995 // for code aging to work properly.
996 if (code_pre_aging) {
997 // Pre-age the code.
998 // This matches the code found in PatchPlatformCodeAge()
999 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
1000 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
1001 // Don't use Call -- we need to preserve ip and lr
1002 nop(); // marker to detect sequence (see IsOld)
1003 mov(r3, Operand(target));
1004 Jump(r3);
1005 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
1006 nop();
1007 }
1008 } else {
1009 // This matches the code found in GetNoCodeAgeSequence()
1010 PushStandardFrame(r4);
1011 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
1012 nop();
1013 }
1014 }
1015 }
1016 if (FLAG_enable_embedded_constant_pool) {
1017 // base contains prologue address
1018 LoadConstantPoolPointerRegister(base, -prologue_offset);
1019 set_constant_pool_available(true);
1020 }
1021 }
1022
1023
EmitLoadTypeFeedbackVector(Register vector)1024 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
1025 LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1026 LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
1027 LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
1028 }
1029
1030
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1031 void MacroAssembler::EnterFrame(StackFrame::Type type,
1032 bool load_constant_pool_pointer_reg) {
1033 if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1034 // Push type explicitly so we can leverage the constant pool.
1035 // This path cannot rely on ip containing code entry.
1036 PushCommonFrame();
1037 LoadConstantPoolPointerRegister();
1038 LoadSmiLiteral(ip, Smi::FromInt(type));
1039 push(ip);
1040 } else {
1041 LoadSmiLiteral(ip, Smi::FromInt(type));
1042 PushCommonFrame(ip);
1043 }
1044 if (type == StackFrame::INTERNAL) {
1045 mov(r0, Operand(CodeObject()));
1046 push(r0);
1047 }
1048 }
1049
1050
LeaveFrame(StackFrame::Type type,int stack_adjustment)1051 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1052 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1053 // r3: preserved
1054 // r4: preserved
1055 // r5: preserved
1056
1057 // Drop the execution stack down to the frame pointer and restore
1058 // the caller's state.
1059 int frame_ends;
1060 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1061 LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1062 if (FLAG_enable_embedded_constant_pool) {
1063 LoadP(kConstantPoolRegister,
1064 MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
1065 }
1066 mtlr(r0);
1067 frame_ends = pc_offset();
1068 Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
1069 mr(fp, ip);
1070 return frame_ends;
1071 }
1072
EnterBuiltinFrame(Register context,Register target,Register argc)1073 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
1074 Register argc) {
1075 int fp_delta = 0;
1076 mflr(r0);
1077 if (FLAG_enable_embedded_constant_pool) {
1078 if (target.is_valid()) {
1079 Push(r0, fp, kConstantPoolRegister, context, target);
1080 fp_delta = 3;
1081 } else {
1082 Push(r0, fp, kConstantPoolRegister, context);
1083 fp_delta = 2;
1084 }
1085 } else {
1086 if (target.is_valid()) {
1087 Push(r0, fp, context, target);
1088 fp_delta = 2;
1089 } else {
1090 Push(r0, fp, context);
1091 fp_delta = 1;
1092 }
1093 }
1094 addi(fp, sp, Operand(fp_delta * kPointerSize));
1095 Push(argc);
1096 }
1097
LeaveBuiltinFrame(Register context,Register target,Register argc)1098 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
1099 Register argc) {
1100 Pop(argc);
1101 if (FLAG_enable_embedded_constant_pool) {
1102 if (target.is_valid()) {
1103 Pop(r0, fp, kConstantPoolRegister, context, target);
1104 } else {
1105 Pop(r0, fp, kConstantPoolRegister, context);
1106 }
1107 } else {
1108 if (target.is_valid()) {
1109 Pop(r0, fp, context, target);
1110 } else {
1111 Pop(r0, fp, context);
1112 }
1113 }
1114 mtlr(r0);
1115 }
1116
1117 // ExitFrame layout (probably wrongish.. needs updating)
1118 //
1119 // SP -> previousSP
1120 // LK reserved
1121 // code
1122 // sp_on_exit (for debug?)
1123 // oldSP->prev SP
1124 // LK
1125 // <parameters on stack>
1126
1127 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1128 // on the stack that we need to wrap a real frame around.. so first
1129 // we reserve a slot for LK and push the previous SP which is captured
1130 // in the fp register (r31)
1131 // Then - we buy a new frame
1132
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1133 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1134 StackFrame::Type frame_type) {
1135 DCHECK(frame_type == StackFrame::EXIT ||
1136 frame_type == StackFrame::BUILTIN_EXIT);
1137 // Set up the frame structure on the stack.
1138 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1139 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1140 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1141 DCHECK(stack_space > 0);
1142
1143 // This is an opportunity to build a frame to wrap
1144 // all of the pushes that have happened inside of V8
1145 // since we were called from C code
1146
1147 LoadSmiLiteral(ip, Smi::FromInt(frame_type));
1148 PushCommonFrame(ip);
1149 // Reserve room for saved entry sp and code object.
1150 subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1151
1152 if (emit_debug_code()) {
1153 li(r8, Operand::Zero());
1154 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1155 }
1156 if (FLAG_enable_embedded_constant_pool) {
1157 StoreP(kConstantPoolRegister,
1158 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1159 }
1160 mov(r8, Operand(CodeObject()));
1161 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1162
1163 // Save the frame pointer and the context in top.
1164 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1165 StoreP(fp, MemOperand(r8));
1166 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1167 StoreP(cp, MemOperand(r8));
1168
1169 // Optionally save all volatile double registers.
1170 if (save_doubles) {
1171 MultiPushDoubles(kCallerSavedDoubles);
1172 // Note that d0 will be accessible at
1173 // fp - ExitFrameConstants::kFrameSize -
1174 // kNumCallerSavedDoubles * kDoubleSize,
1175 // since the sp slot and code slot were pushed after the fp.
1176 }
1177
1178 addi(sp, sp, Operand(-stack_space * kPointerSize));
1179
1180 // Allocate and align the frame preparing for calling the runtime
1181 // function.
1182 const int frame_alignment = ActivationFrameAlignment();
1183 if (frame_alignment > kPointerSize) {
1184 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1185 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1186 }
1187 li(r0, Operand::Zero());
1188 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1189
1190 // Set the exit frame sp value to point just before the return address
1191 // location.
1192 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
1193 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1194 }
1195
1196
InitializeNewString(Register string,Register length,Heap::RootListIndex map_index,Register scratch1,Register scratch2)1197 void MacroAssembler::InitializeNewString(Register string, Register length,
1198 Heap::RootListIndex map_index,
1199 Register scratch1, Register scratch2) {
1200 SmiTag(scratch1, length);
1201 LoadRoot(scratch2, map_index);
1202 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
1203 li(scratch1, Operand(String::kEmptyHashField));
1204 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
1205 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
1206 }
1207
1208
ActivationFrameAlignment()1209 int MacroAssembler::ActivationFrameAlignment() {
1210 #if !defined(USE_SIMULATOR)
1211 // Running on the real platform. Use the alignment as mandated by the local
1212 // environment.
1213 // Note: This will break if we ever start generating snapshots on one PPC
1214 // platform for another PPC platform with a different alignment.
1215 return base::OS::ActivationFrameAlignment();
1216 #else // Simulated
1217 // If we are using the simulator then we should always align to the expected
1218 // alignment. As the simulator is used to generate snapshots we do not know
1219 // if the target platform will need alignment, so this is controlled from a
1220 // flag.
1221 return FLAG_sim_stack_alignment;
1222 #endif
1223 }
1224
1225
LeaveExitFrame(bool save_doubles,Register argument_count,bool restore_context,bool argument_count_is_length)1226 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1227 bool restore_context,
1228 bool argument_count_is_length) {
1229 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1230 // Optionally restore all double registers.
1231 if (save_doubles) {
1232 // Calculate the stack location of the saved doubles and restore them.
1233 const int kNumRegs = kNumCallerSavedDoubles;
1234 const int offset =
1235 (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
1236 addi(r6, fp, Operand(-offset));
1237 MultiPopDoubles(kCallerSavedDoubles, r6);
1238 }
1239
1240 // Clear top frame.
1241 li(r6, Operand::Zero());
1242 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1243 StoreP(r6, MemOperand(ip));
1244
1245 // Restore current context from top and clear it in debug mode.
1246 if (restore_context) {
1247 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1248 LoadP(cp, MemOperand(ip));
1249 }
1250 #ifdef DEBUG
1251 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1252 StoreP(r6, MemOperand(ip));
1253 #endif
1254
1255 // Tear down the exit frame, pop the arguments, and return.
1256 LeaveFrame(StackFrame::EXIT);
1257
1258 if (argument_count.is_valid()) {
1259 if (!argument_count_is_length) {
1260 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
1261 }
1262 add(sp, sp, argument_count);
1263 }
1264 }
1265
1266
MovFromFloatResult(const DoubleRegister dst)1267 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
1268 Move(dst, d1);
1269 }
1270
1271
MovFromFloatParameter(const DoubleRegister dst)1272 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1273 Move(dst, d1);
1274 }
1275
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1276 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1277 Register caller_args_count_reg,
1278 Register scratch0, Register scratch1) {
1279 #if DEBUG
1280 if (callee_args_count.is_reg()) {
1281 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1282 scratch1));
1283 } else {
1284 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1285 }
1286 #endif
1287
1288 // Calculate the end of destination area where we will put the arguments
1289 // after we drop current frame. We add kPointerSize to count the receiver
1290 // argument which is not included into formal parameters count.
1291 Register dst_reg = scratch0;
1292 ShiftLeftImm(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1293 add(dst_reg, fp, dst_reg);
1294 addi(dst_reg, dst_reg,
1295 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1296
1297 Register src_reg = caller_args_count_reg;
1298 // Calculate the end of source area. +kPointerSize is for the receiver.
1299 if (callee_args_count.is_reg()) {
1300 ShiftLeftImm(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1301 add(src_reg, sp, src_reg);
1302 addi(src_reg, src_reg, Operand(kPointerSize));
1303 } else {
1304 Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize, r0);
1305 }
1306
1307 if (FLAG_debug_code) {
1308 cmpl(src_reg, dst_reg);
1309 Check(lt, kStackAccessBelowStackPointer);
1310 }
1311
1312 // Restore caller's frame pointer and return address now as they will be
1313 // overwritten by the copying loop.
1314 RestoreFrameStateForTailCall();
1315
1316 // Now copy callee arguments to the caller frame going backwards to avoid
1317 // callee arguments corruption (source and destination areas could overlap).
1318
1319 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1320 // so they must be pre-decremented in the loop.
1321 Register tmp_reg = scratch1;
1322 Label loop;
1323 if (callee_args_count.is_reg()) {
1324 addi(tmp_reg, callee_args_count.reg(), Operand(1)); // +1 for receiver
1325 } else {
1326 mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1327 }
1328 mtctr(tmp_reg);
1329 bind(&loop);
1330 LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
1331 StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1332 bdnz(&loop);
1333
1334 // Leave current frame.
1335 mr(sp, dst_reg);
1336 }
1337
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag,const CallWrapper & call_wrapper)1338 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1339 const ParameterCount& actual, Label* done,
1340 bool* definitely_mismatches,
1341 InvokeFlag flag,
1342 const CallWrapper& call_wrapper) {
1343 bool definitely_matches = false;
1344 *definitely_mismatches = false;
1345 Label regular_invoke;
1346
1347 // Check whether the expected and actual arguments count match. If not,
1348 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1349 // r3: actual arguments count
1350 // r4: function (passed through to callee)
1351 // r5: expected arguments count
1352
1353 // The code below is made a lot easier because the calling code already sets
1354 // up actual and expected registers according to the contract if values are
1355 // passed in registers.
1356
1357 // ARM has some sanity checks as per below, considering add them for PPC
1358 // DCHECK(actual.is_immediate() || actual.reg().is(r3));
1359 // DCHECK(expected.is_immediate() || expected.reg().is(r5));
1360
1361 if (expected.is_immediate()) {
1362 DCHECK(actual.is_immediate());
1363 mov(r3, Operand(actual.immediate()));
1364 if (expected.immediate() == actual.immediate()) {
1365 definitely_matches = true;
1366 } else {
1367 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1368 if (expected.immediate() == sentinel) {
1369 // Don't worry about adapting arguments for builtins that
1370 // don't want that done. Skip adaption code by making it look
1371 // like we have a match between expected and actual number of
1372 // arguments.
1373 definitely_matches = true;
1374 } else {
1375 *definitely_mismatches = true;
1376 mov(r5, Operand(expected.immediate()));
1377 }
1378 }
1379 } else {
1380 if (actual.is_immediate()) {
1381 mov(r3, Operand(actual.immediate()));
1382 cmpi(expected.reg(), Operand(actual.immediate()));
1383 beq(®ular_invoke);
1384 } else {
1385 cmp(expected.reg(), actual.reg());
1386 beq(®ular_invoke);
1387 }
1388 }
1389
1390 if (!definitely_matches) {
1391 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1392 if (flag == CALL_FUNCTION) {
1393 call_wrapper.BeforeCall(CallSize(adaptor));
1394 Call(adaptor);
1395 call_wrapper.AfterCall();
1396 if (!*definitely_mismatches) {
1397 b(done);
1398 }
1399 } else {
1400 Jump(adaptor, RelocInfo::CODE_TARGET);
1401 }
1402 bind(®ular_invoke);
1403 }
1404 }
1405
1406
FloodFunctionIfStepping(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1407 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
1408 const ParameterCount& expected,
1409 const ParameterCount& actual) {
1410 Label skip_flooding;
1411 ExternalReference last_step_action =
1412 ExternalReference::debug_last_step_action_address(isolate());
1413 STATIC_ASSERT(StepFrame > StepIn);
1414 mov(r7, Operand(last_step_action));
1415 LoadByte(r7, MemOperand(r7), r0);
1416 extsb(r7, r7);
1417 cmpi(r7, Operand(StepIn));
1418 blt(&skip_flooding);
1419 {
1420 FrameScope frame(this,
1421 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1422 if (expected.is_reg()) {
1423 SmiTag(expected.reg());
1424 Push(expected.reg());
1425 }
1426 if (actual.is_reg()) {
1427 SmiTag(actual.reg());
1428 Push(actual.reg());
1429 }
1430 if (new_target.is_valid()) {
1431 Push(new_target);
1432 }
1433 Push(fun, fun);
1434 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
1435 Pop(fun);
1436 if (new_target.is_valid()) {
1437 Pop(new_target);
1438 }
1439 if (actual.is_reg()) {
1440 Pop(actual.reg());
1441 SmiUntag(actual.reg());
1442 }
1443 if (expected.is_reg()) {
1444 Pop(expected.reg());
1445 SmiUntag(expected.reg());
1446 }
1447 }
1448 bind(&skip_flooding);
1449 }
1450
1451
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1452 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1453 const ParameterCount& expected,
1454 const ParameterCount& actual,
1455 InvokeFlag flag,
1456 const CallWrapper& call_wrapper) {
1457 // You can't call a function without a valid frame.
1458 DCHECK(flag == JUMP_FUNCTION || has_frame());
1459 DCHECK(function.is(r4));
1460 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
1461
1462 if (call_wrapper.NeedsDebugStepCheck()) {
1463 FloodFunctionIfStepping(function, new_target, expected, actual);
1464 }
1465
1466 // Clear the new.target register if not given.
1467 if (!new_target.is_valid()) {
1468 LoadRoot(r6, Heap::kUndefinedValueRootIndex);
1469 }
1470
1471 Label done;
1472 bool definitely_mismatches = false;
1473 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1474 call_wrapper);
1475 if (!definitely_mismatches) {
1476 // We call indirectly through the code field in the function to
1477 // allow recompilation to take effect without changing any of the
1478 // call sites.
1479 Register code = ip;
1480 LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1481 if (flag == CALL_FUNCTION) {
1482 call_wrapper.BeforeCall(CallSize(code));
1483 CallJSEntry(code);
1484 call_wrapper.AfterCall();
1485 } else {
1486 DCHECK(flag == JUMP_FUNCTION);
1487 JumpToJSEntry(code);
1488 }
1489
1490 // Continue here if InvokePrologue does handle the invocation due to
1491 // mismatched parameter counts.
1492 bind(&done);
1493 }
1494 }
1495
1496
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1497 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1498 const ParameterCount& actual,
1499 InvokeFlag flag,
1500 const CallWrapper& call_wrapper) {
1501 // You can't call a function without a valid frame.
1502 DCHECK(flag == JUMP_FUNCTION || has_frame());
1503
1504 // Contract with called JS functions requires that function is passed in r4.
1505 DCHECK(fun.is(r4));
1506
1507 Register expected_reg = r5;
1508 Register temp_reg = r7;
1509
1510 LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1511 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1512 LoadWordArith(expected_reg,
1513 FieldMemOperand(
1514 temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1515 #if !defined(V8_TARGET_ARCH_PPC64)
1516 SmiUntag(expected_reg);
1517 #endif
1518
1519 ParameterCount expected(expected_reg);
1520 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
1521 }
1522
1523
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1524 void MacroAssembler::InvokeFunction(Register function,
1525 const ParameterCount& expected,
1526 const ParameterCount& actual,
1527 InvokeFlag flag,
1528 const CallWrapper& call_wrapper) {
1529 // You can't call a function without a valid frame.
1530 DCHECK(flag == JUMP_FUNCTION || has_frame());
1531
1532 // Contract with called JS functions requires that function is passed in r4.
1533 DCHECK(function.is(r4));
1534
1535 // Get the function and setup the context.
1536 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1537
1538 InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper);
1539 }
1540
1541
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)1542 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1543 const ParameterCount& expected,
1544 const ParameterCount& actual,
1545 InvokeFlag flag,
1546 const CallWrapper& call_wrapper) {
1547 Move(r4, function);
1548 InvokeFunction(r4, expected, actual, flag, call_wrapper);
1549 }
1550
1551
IsObjectJSStringType(Register object,Register scratch,Label * fail)1552 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1553 Label* fail) {
1554 DCHECK(kNotStringTag != 0);
1555
1556 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1557 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1558 andi(r0, scratch, Operand(kIsNotStringMask));
1559 bne(fail, cr0);
1560 }
1561
1562
IsObjectNameType(Register object,Register scratch,Label * fail)1563 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1564 Label* fail) {
1565 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1566 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1567 cmpi(scratch, Operand(LAST_NAME_TYPE));
1568 bgt(fail);
1569 }
1570
1571
DebugBreak()1572 void MacroAssembler::DebugBreak() {
1573 li(r3, Operand::Zero());
1574 mov(r4,
1575 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
1576 CEntryStub ces(isolate(), 1);
1577 DCHECK(AllowThisStubCall(&ces));
1578 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
1579 }
1580
1581
PushStackHandler()1582 void MacroAssembler::PushStackHandler() {
1583 // Adjust this code if not the case.
1584 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1585 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1586
1587 // Link the current handler as the next handler.
1588 // Preserve r3-r7.
1589 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1590 LoadP(r0, MemOperand(r8));
1591 push(r0);
1592
1593 // Set this new handler as the current one.
1594 StoreP(sp, MemOperand(r8));
1595 }
1596
1597
PopStackHandler()1598 void MacroAssembler::PopStackHandler() {
1599 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1600 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1601
1602 pop(r4);
1603 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1604 StoreP(r4, MemOperand(ip));
1605 }
1606
1607
1608 // Compute the hash code from the untagged key. This must be kept in sync with
1609 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1610 // code-stub-hydrogen.cc
GetNumberHash(Register t0,Register scratch)1611 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1612 // First of all we assign the hash seed to scratch.
1613 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1614 SmiUntag(scratch);
1615
1616 // Xor original key with a seed.
1617 xor_(t0, t0, scratch);
1618
1619 // Compute the hash code from the untagged key. This must be kept in sync
1620 // with ComputeIntegerHash in utils.h.
1621 //
1622 // hash = ~hash + (hash << 15);
1623 notx(scratch, t0);
1624 slwi(t0, t0, Operand(15));
1625 add(t0, scratch, t0);
1626 // hash = hash ^ (hash >> 12);
1627 srwi(scratch, t0, Operand(12));
1628 xor_(t0, t0, scratch);
1629 // hash = hash + (hash << 2);
1630 slwi(scratch, t0, Operand(2));
1631 add(t0, t0, scratch);
1632 // hash = hash ^ (hash >> 4);
1633 srwi(scratch, t0, Operand(4));
1634 xor_(t0, t0, scratch);
1635 // hash = hash * 2057;
1636 mr(r0, t0);
1637 slwi(scratch, t0, Operand(3));
1638 add(t0, t0, scratch);
1639 slwi(scratch, r0, Operand(11));
1640 add(t0, t0, scratch);
1641 // hash = hash ^ (hash >> 16);
1642 srwi(scratch, t0, Operand(16));
1643 xor_(t0, t0, scratch);
1644 // hash & 0x3fffffff
1645 ExtractBitRange(t0, t0, 29, 0);
1646 }
1647
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)1648 void MacroAssembler::Allocate(int object_size, Register result,
1649 Register scratch1, Register scratch2,
1650 Label* gc_required, AllocationFlags flags) {
1651 DCHECK(object_size <= kMaxRegularHeapObjectSize);
1652 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1653 if (!FLAG_inline_new) {
1654 if (emit_debug_code()) {
1655 // Trash the registers to simulate an allocation failure.
1656 li(result, Operand(0x7091));
1657 li(scratch1, Operand(0x7191));
1658 li(scratch2, Operand(0x7291));
1659 }
1660 b(gc_required);
1661 return;
1662 }
1663
1664 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1665
1666 // Make object size into bytes.
1667 if ((flags & SIZE_IN_WORDS) != 0) {
1668 object_size *= kPointerSize;
1669 }
1670 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1671
1672 // Check relative positions of allocation top and limit addresses.
1673 ExternalReference allocation_top =
1674 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1675 ExternalReference allocation_limit =
1676 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1677
1678 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1679 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1680 DCHECK((limit - top) == kPointerSize);
1681
1682 // Set up allocation top address register.
1683 Register top_address = scratch1;
1684 // This code stores a temporary value in ip. This is OK, as the code below
1685 // does not need ip for implicit literal generation.
1686 Register alloc_limit = ip;
1687 Register result_end = scratch2;
1688 mov(top_address, Operand(allocation_top));
1689
1690 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1691 // Load allocation top into result and allocation limit into ip.
1692 LoadP(result, MemOperand(top_address));
1693 LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1694 } else {
1695 if (emit_debug_code()) {
1696 // Assert that result actually contains top on entry.
1697 LoadP(alloc_limit, MemOperand(top_address));
1698 cmp(result, alloc_limit);
1699 Check(eq, kUnexpectedAllocationTop);
1700 }
1701 // Load allocation limit. Result already contains allocation top.
1702 LoadP(alloc_limit, MemOperand(top_address, limit - top));
1703 }
1704
1705 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1706 // Align the next allocation. Storing the filler map without checking top is
1707 // safe in new-space because the limit of the heap is aligned there.
1708 #if V8_TARGET_ARCH_PPC64
1709 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1710 #else
1711 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1712 andi(result_end, result, Operand(kDoubleAlignmentMask));
1713 Label aligned;
1714 beq(&aligned, cr0);
1715 if ((flags & PRETENURE) != 0) {
1716 cmpl(result, alloc_limit);
1717 bge(gc_required);
1718 }
1719 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1720 stw(result_end, MemOperand(result));
1721 addi(result, result, Operand(kDoubleSize / 2));
1722 bind(&aligned);
1723 #endif
1724 }
1725
1726 // Calculate new top and bail out if new space is exhausted. Use result
1727 // to calculate the new top.
1728 sub(r0, alloc_limit, result);
1729 if (is_int16(object_size)) {
1730 cmpi(r0, Operand(object_size));
1731 blt(gc_required);
1732 addi(result_end, result, Operand(object_size));
1733 } else {
1734 Cmpi(r0, Operand(object_size), result_end);
1735 blt(gc_required);
1736 add(result_end, result, result_end);
1737 }
1738
1739 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1740 // The top pointer is not updated for allocation folding dominators.
1741 StoreP(result_end, MemOperand(top_address));
1742 }
1743
1744 // Tag object.
1745 addi(result, result, Operand(kHeapObjectTag));
1746 }
1747
1748
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)1749 void MacroAssembler::Allocate(Register object_size, Register result,
1750 Register result_end, Register scratch,
1751 Label* gc_required, AllocationFlags flags) {
1752 DCHECK((flags & ALLOCATION_FOLDED) == 0);
1753 if (!FLAG_inline_new) {
1754 if (emit_debug_code()) {
1755 // Trash the registers to simulate an allocation failure.
1756 li(result, Operand(0x7091));
1757 li(scratch, Operand(0x7191));
1758 li(result_end, Operand(0x7291));
1759 }
1760 b(gc_required);
1761 return;
1762 }
1763
1764 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1765 // is not specified. Other registers must not overlap.
1766 DCHECK(!AreAliased(object_size, result, scratch, ip));
1767 DCHECK(!AreAliased(result_end, result, scratch, ip));
1768 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1769
1770 // Check relative positions of allocation top and limit addresses.
1771 ExternalReference allocation_top =
1772 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1773 ExternalReference allocation_limit =
1774 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1775 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1776 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1777 DCHECK((limit - top) == kPointerSize);
1778
1779 // Set up allocation top address and allocation limit registers.
1780 Register top_address = scratch;
1781 // This code stores a temporary value in ip. This is OK, as the code below
1782 // does not need ip for implicit literal generation.
1783 Register alloc_limit = ip;
1784 mov(top_address, Operand(allocation_top));
1785
1786 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1787 // Load allocation top into result and allocation limit into alloc_limit..
1788 LoadP(result, MemOperand(top_address));
1789 LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
1790 } else {
1791 if (emit_debug_code()) {
1792 // Assert that result actually contains top on entry.
1793 LoadP(alloc_limit, MemOperand(top_address));
1794 cmp(result, alloc_limit);
1795 Check(eq, kUnexpectedAllocationTop);
1796 }
1797 // Load allocation limit. Result already contains allocation top.
1798 LoadP(alloc_limit, MemOperand(top_address, limit - top));
1799 }
1800
1801 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1802 // Align the next allocation. Storing the filler map without checking top is
1803 // safe in new-space because the limit of the heap is aligned there.
1804 #if V8_TARGET_ARCH_PPC64
1805 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1806 #else
1807 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1808 andi(result_end, result, Operand(kDoubleAlignmentMask));
1809 Label aligned;
1810 beq(&aligned, cr0);
1811 if ((flags & PRETENURE) != 0) {
1812 cmpl(result, alloc_limit);
1813 bge(gc_required);
1814 }
1815 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1816 stw(result_end, MemOperand(result));
1817 addi(result, result, Operand(kDoubleSize / 2));
1818 bind(&aligned);
1819 #endif
1820 }
1821
1822 // Calculate new top and bail out if new space is exhausted. Use result
1823 // to calculate the new top. Object size may be in words so a shift is
1824 // required to get the number of bytes.
1825 sub(r0, alloc_limit, result);
1826 if ((flags & SIZE_IN_WORDS) != 0) {
1827 ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
1828 cmp(r0, result_end);
1829 blt(gc_required);
1830 add(result_end, result, result_end);
1831 } else {
1832 cmp(r0, object_size);
1833 blt(gc_required);
1834 add(result_end, result, object_size);
1835 }
1836
1837 // Update allocation top. result temporarily holds the new top.
1838 if (emit_debug_code()) {
1839 andi(r0, result_end, Operand(kObjectAlignmentMask));
1840 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1841 }
1842 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1843 // The top pointer is not updated for allocation folding dominators.
1844 StoreP(result_end, MemOperand(top_address));
1845 }
1846
1847 // Tag object.
1848 addi(result, result, Operand(kHeapObjectTag));
1849 }
1850
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)1851 void MacroAssembler::FastAllocate(Register object_size, Register result,
1852 Register result_end, Register scratch,
1853 AllocationFlags flags) {
1854 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
1855 // is not specified. Other registers must not overlap.
1856 DCHECK(!AreAliased(object_size, result, scratch, ip));
1857 DCHECK(!AreAliased(result_end, result, scratch, ip));
1858 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
1859
1860 ExternalReference allocation_top =
1861 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1862
1863 Register top_address = scratch;
1864 mov(top_address, Operand(allocation_top));
1865 LoadP(result, MemOperand(top_address));
1866
1867 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1868 // Align the next allocation. Storing the filler map without checking top is
1869 // safe in new-space because the limit of the heap is aligned there.
1870 #if V8_TARGET_ARCH_PPC64
1871 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1872 #else
1873 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1874 andi(result_end, result, Operand(kDoubleAlignmentMask));
1875 Label aligned;
1876 beq(&aligned);
1877 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1878 stw(result_end, MemOperand(result));
1879 addi(result, result, Operand(kDoubleSize / 2));
1880 bind(&aligned);
1881 #endif
1882 }
1883
1884 // Calculate new top using result. Object size may be in words so a shift is
1885 // required to get the number of bytes.
1886 if ((flags & SIZE_IN_WORDS) != 0) {
1887 ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
1888 add(result_end, result, result_end);
1889 } else {
1890 add(result_end, result, object_size);
1891 }
1892
1893 // Update allocation top. result temporarily holds the new top.
1894 if (emit_debug_code()) {
1895 andi(r0, result_end, Operand(kObjectAlignmentMask));
1896 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1897 }
1898 StoreP(result_end, MemOperand(top_address));
1899
1900 // Tag object.
1901 addi(result, result, Operand(kHeapObjectTag));
1902 }
1903
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)1904 void MacroAssembler::FastAllocate(int object_size, Register result,
1905 Register scratch1, Register scratch2,
1906 AllocationFlags flags) {
1907 DCHECK(object_size <= kMaxRegularHeapObjectSize);
1908 DCHECK(!AreAliased(result, scratch1, scratch2, ip));
1909
1910 // Make object size into bytes.
1911 if ((flags & SIZE_IN_WORDS) != 0) {
1912 object_size *= kPointerSize;
1913 }
1914 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
1915
1916 ExternalReference allocation_top =
1917 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1918
1919 // Set up allocation top address register.
1920 Register top_address = scratch1;
1921 Register result_end = scratch2;
1922 mov(top_address, Operand(allocation_top));
1923 LoadP(result, MemOperand(top_address));
1924
1925 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1926 // Align the next allocation. Storing the filler map without checking top is
1927 // safe in new-space because the limit of the heap is aligned there.
1928 #if V8_TARGET_ARCH_PPC64
1929 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1930 #else
1931 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1932 andi(result_end, result, Operand(kDoubleAlignmentMask));
1933 Label aligned;
1934 beq(&aligned);
1935 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
1936 stw(result_end, MemOperand(result));
1937 addi(result, result, Operand(kDoubleSize / 2));
1938 bind(&aligned);
1939 #endif
1940 }
1941
1942 // Calculate new top using result.
1943 Add(result_end, result, object_size, r0);
1944
1945 // The top pointer is not updated for allocation folding dominators.
1946 StoreP(result_end, MemOperand(top_address));
1947
1948 // Tag object.
1949 addi(result, result, Operand(kHeapObjectTag));
1950 }
1951
1952
AllocateTwoByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1953 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
1954 Register scratch1, Register scratch2,
1955 Register scratch3,
1956 Label* gc_required) {
1957 // Calculate the number of bytes needed for the characters in the string while
1958 // observing object alignment.
1959 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1960 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars.
1961 addi(scratch1, scratch1,
1962 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1963 mov(r0, Operand(~kObjectAlignmentMask));
1964 and_(scratch1, scratch1, r0);
1965
1966 // Allocate two-byte string in new space.
1967 Allocate(scratch1, result, scratch2, scratch3, gc_required,
1968 NO_ALLOCATION_FLAGS);
1969
1970 // Set the map, length and hash field.
1971 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
1972 scratch2);
1973 }
1974
1975
AllocateOneByteString(Register result,Register length,Register scratch1,Register scratch2,Register scratch3,Label * gc_required)1976 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1977 Register scratch1, Register scratch2,
1978 Register scratch3,
1979 Label* gc_required) {
1980 // Calculate the number of bytes needed for the characters in the string while
1981 // observing object alignment.
1982 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1983 DCHECK(kCharSize == 1);
1984 addi(scratch1, length,
1985 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1986 li(r0, Operand(~kObjectAlignmentMask));
1987 and_(scratch1, scratch1, r0);
1988
1989 // Allocate one-byte string in new space.
1990 Allocate(scratch1, result, scratch2, scratch3, gc_required,
1991 NO_ALLOCATION_FLAGS);
1992
1993 // Set the map, length and hash field.
1994 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1995 scratch1, scratch2);
1996 }
1997
1998
AllocateTwoByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)1999 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
2000 Register scratch1,
2001 Register scratch2,
2002 Label* gc_required) {
2003 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2004 NO_ALLOCATION_FLAGS);
2005
2006 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
2007 scratch2);
2008 }
2009
2010
AllocateOneByteConsString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2011 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
2012 Register scratch1,
2013 Register scratch2,
2014 Label* gc_required) {
2015 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
2016 NO_ALLOCATION_FLAGS);
2017
2018 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
2019 scratch1, scratch2);
2020 }
2021
2022
AllocateTwoByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2023 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2024 Register length,
2025 Register scratch1,
2026 Register scratch2,
2027 Label* gc_required) {
2028 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2029 NO_ALLOCATION_FLAGS);
2030
2031 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
2032 scratch2);
2033 }
2034
2035
AllocateOneByteSlicedString(Register result,Register length,Register scratch1,Register scratch2,Label * gc_required)2036 void MacroAssembler::AllocateOneByteSlicedString(Register result,
2037 Register length,
2038 Register scratch1,
2039 Register scratch2,
2040 Label* gc_required) {
2041 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2042 NO_ALLOCATION_FLAGS);
2043
2044 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
2045 scratch1, scratch2);
2046 }
2047
2048
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)2049 void MacroAssembler::CompareObjectType(Register object, Register map,
2050 Register type_reg, InstanceType type) {
2051 const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
2052
2053 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2054 CompareInstanceType(map, temp, type);
2055 }
2056
2057
CompareInstanceType(Register map,Register type_reg,InstanceType type)2058 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
2059 InstanceType type) {
2060 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2061 STATIC_ASSERT(LAST_TYPE < 256);
2062 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2063 cmpi(type_reg, Operand(type));
2064 }
2065
2066
CompareRoot(Register obj,Heap::RootListIndex index)2067 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
2068 DCHECK(!obj.is(r0));
2069 LoadRoot(r0, index);
2070 cmp(obj, r0);
2071 }
2072
CheckFastObjectElements(Register map,Register scratch,Label * fail)2073 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
2074 Label* fail) {
2075 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2076 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2077 STATIC_ASSERT(FAST_ELEMENTS == 2);
2078 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2079 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2080 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2081 ble(fail);
2082 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2083 bgt(fail);
2084 }
2085
2086
CheckFastSmiElements(Register map,Register scratch,Label * fail)2087 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
2088 Label* fail) {
2089 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2090 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2091 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2092 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2093 bgt(fail);
2094 }
2095
2096
StoreNumberToDoubleElements(Register value_reg,Register key_reg,Register elements_reg,Register scratch1,DoubleRegister double_scratch,Label * fail,int elements_offset)2097 void MacroAssembler::StoreNumberToDoubleElements(
2098 Register value_reg, Register key_reg, Register elements_reg,
2099 Register scratch1, DoubleRegister double_scratch, Label* fail,
2100 int elements_offset) {
2101 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
2102 Label smi_value, store;
2103
2104 // Handle smi values specially.
2105 JumpIfSmi(value_reg, &smi_value);
2106
2107 // Ensure that the object is a heap number
2108 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
2109 DONT_DO_SMI_CHECK);
2110
2111 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2112 // Double value, turn potential sNaN into qNaN.
2113 CanonicalizeNaN(double_scratch);
2114 b(&store);
2115
2116 bind(&smi_value);
2117 SmiToDouble(double_scratch, value_reg);
2118
2119 bind(&store);
2120 SmiToDoubleArrayOffset(scratch1, key_reg);
2121 add(scratch1, elements_reg, scratch1);
2122 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
2123 elements_offset));
2124 }
2125
2126
AddAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)2127 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
2128 Register right,
2129 Register overflow_dst,
2130 Register scratch) {
2131 DCHECK(!dst.is(overflow_dst));
2132 DCHECK(!dst.is(scratch));
2133 DCHECK(!overflow_dst.is(scratch));
2134 DCHECK(!overflow_dst.is(left));
2135 DCHECK(!overflow_dst.is(right));
2136
2137 bool left_is_right = left.is(right);
2138 RCBit xorRC = left_is_right ? SetRC : LeaveRC;
2139
2140 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
2141 if (dst.is(left)) {
2142 mr(scratch, left); // Preserve left.
2143 add(dst, left, right); // Left is overwritten.
2144 xor_(overflow_dst, dst, scratch, xorRC); // Original left.
2145 if (!left_is_right) xor_(scratch, dst, right);
2146 } else if (dst.is(right)) {
2147 mr(scratch, right); // Preserve right.
2148 add(dst, left, right); // Right is overwritten.
2149 xor_(overflow_dst, dst, left, xorRC);
2150 if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
2151 } else {
2152 add(dst, left, right);
2153 xor_(overflow_dst, dst, left, xorRC);
2154 if (!left_is_right) xor_(scratch, dst, right);
2155 }
2156 if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
2157 }
2158
2159
AddAndCheckForOverflow(Register dst,Register left,intptr_t right,Register overflow_dst,Register scratch)2160 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
2161 intptr_t right,
2162 Register overflow_dst,
2163 Register scratch) {
2164 Register original_left = left;
2165 DCHECK(!dst.is(overflow_dst));
2166 DCHECK(!dst.is(scratch));
2167 DCHECK(!overflow_dst.is(scratch));
2168 DCHECK(!overflow_dst.is(left));
2169
2170 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
2171 if (dst.is(left)) {
2172 // Preserve left.
2173 original_left = overflow_dst;
2174 mr(original_left, left);
2175 }
2176 Add(dst, left, right, scratch);
2177 xor_(overflow_dst, dst, original_left);
2178 if (right >= 0) {
2179 and_(overflow_dst, overflow_dst, dst, SetRC);
2180 } else {
2181 andc(overflow_dst, overflow_dst, dst, SetRC);
2182 }
2183 }
2184
2185
SubAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)2186 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
2187 Register right,
2188 Register overflow_dst,
2189 Register scratch) {
2190 DCHECK(!dst.is(overflow_dst));
2191 DCHECK(!dst.is(scratch));
2192 DCHECK(!overflow_dst.is(scratch));
2193 DCHECK(!overflow_dst.is(left));
2194 DCHECK(!overflow_dst.is(right));
2195
2196 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
2197 if (dst.is(left)) {
2198 mr(scratch, left); // Preserve left.
2199 sub(dst, left, right); // Left is overwritten.
2200 xor_(overflow_dst, dst, scratch);
2201 xor_(scratch, scratch, right);
2202 and_(overflow_dst, overflow_dst, scratch, SetRC);
2203 } else if (dst.is(right)) {
2204 mr(scratch, right); // Preserve right.
2205 sub(dst, left, right); // Right is overwritten.
2206 xor_(overflow_dst, dst, left);
2207 xor_(scratch, left, scratch);
2208 and_(overflow_dst, overflow_dst, scratch, SetRC);
2209 } else {
2210 sub(dst, left, right);
2211 xor_(overflow_dst, dst, left);
2212 xor_(scratch, left, right);
2213 and_(overflow_dst, scratch, overflow_dst, SetRC);
2214 }
2215 }
2216
2217
CompareMap(Register obj,Register scratch,Handle<Map> map,Label * early_success)2218 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2219 Label* early_success) {
2220 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2221 CompareMap(scratch, map, early_success);
2222 }
2223
2224
CompareMap(Register obj_map,Handle<Map> map,Label * early_success)2225 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2226 Label* early_success) {
2227 mov(r0, Operand(map));
2228 cmp(obj_map, r0);
2229 }
2230
2231
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)2232 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2233 Label* fail, SmiCheckType smi_check_type) {
2234 if (smi_check_type == DO_SMI_CHECK) {
2235 JumpIfSmi(obj, fail);
2236 }
2237
2238 Label success;
2239 CompareMap(obj, scratch, map, &success);
2240 bne(fail);
2241 bind(&success);
2242 }
2243
2244
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)2245 void MacroAssembler::CheckMap(Register obj, Register scratch,
2246 Heap::RootListIndex index, Label* fail,
2247 SmiCheckType smi_check_type) {
2248 if (smi_check_type == DO_SMI_CHECK) {
2249 JumpIfSmi(obj, fail);
2250 }
2251 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2252 LoadRoot(r0, index);
2253 cmp(scratch, r0);
2254 bne(fail);
2255 }
2256
2257
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)2258 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2259 Register scratch2, Handle<WeakCell> cell,
2260 Handle<Code> success,
2261 SmiCheckType smi_check_type) {
2262 Label fail;
2263 if (smi_check_type == DO_SMI_CHECK) {
2264 JumpIfSmi(obj, &fail);
2265 }
2266 LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2267 CmpWeakValue(scratch1, cell, scratch2);
2268 Jump(success, RelocInfo::CODE_TARGET, eq);
2269 bind(&fail);
2270 }
2271
2272
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch,CRegister cr)2273 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2274 Register scratch, CRegister cr) {
2275 mov(scratch, Operand(cell));
2276 LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2277 cmp(value, scratch, cr);
2278 }
2279
2280
GetWeakValue(Register value,Handle<WeakCell> cell)2281 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2282 mov(value, Operand(cell));
2283 LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
2284 }
2285
2286
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)2287 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2288 Label* miss) {
2289 GetWeakValue(value, cell);
2290 JumpIfSmi(value, miss);
2291 }
2292
2293
GetMapConstructor(Register result,Register map,Register temp,Register temp2)2294 void MacroAssembler::GetMapConstructor(Register result, Register map,
2295 Register temp, Register temp2) {
2296 Label done, loop;
2297 LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
2298 bind(&loop);
2299 JumpIfSmi(result, &done);
2300 CompareObjectType(result, temp, temp2, MAP_TYPE);
2301 bne(&done);
2302 LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
2303 b(&loop);
2304 bind(&done);
2305 }
2306
2307
TryGetFunctionPrototype(Register function,Register result,Register scratch,Label * miss)2308 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2309 Register scratch, Label* miss) {
2310 // Get the prototype or initial map from the function.
2311 LoadP(result,
2312 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2313
2314 // If the prototype or initial map is the hole, don't return it and
2315 // simply miss the cache instead. This will allow us to allocate a
2316 // prototype object on-demand in the runtime system.
2317 LoadRoot(r0, Heap::kTheHoleValueRootIndex);
2318 cmp(result, r0);
2319 beq(miss);
2320
2321 // If the function does not have an initial map, we're done.
2322 Label done;
2323 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2324 bne(&done);
2325
2326 // Get the prototype from the initial map.
2327 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2328
2329 // All done.
2330 bind(&done);
2331 }
2332
2333
CallStub(CodeStub * stub,TypeFeedbackId ast_id,Condition cond)2334 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2335 Condition cond) {
2336 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2337 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2338 }
2339
2340
TailCallStub(CodeStub * stub,Condition cond)2341 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2342 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2343 }
2344
2345
AllowThisStubCall(CodeStub * stub)2346 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2347 return has_frame_ || !stub->SometimesSetsUpAFrame();
2348 }
2349
SmiToDouble(DoubleRegister value,Register smi)2350 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2351 SmiUntag(ip, smi);
2352 ConvertIntToDouble(ip, value);
2353 }
2354
2355
TestDoubleIsInt32(DoubleRegister double_input,Register scratch1,Register scratch2,DoubleRegister double_scratch)2356 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2357 Register scratch1, Register scratch2,
2358 DoubleRegister double_scratch) {
2359 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2360 }
2361
TestDoubleIsMinusZero(DoubleRegister input,Register scratch1,Register scratch2)2362 void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
2363 Register scratch1,
2364 Register scratch2) {
2365 #if V8_TARGET_ARCH_PPC64
2366 MovDoubleToInt64(scratch1, input);
2367 rotldi(scratch1, scratch1, 1);
2368 cmpi(scratch1, Operand(1));
2369 #else
2370 MovDoubleToInt64(scratch1, scratch2, input);
2371 Label done;
2372 cmpi(scratch2, Operand::Zero());
2373 bne(&done);
2374 lis(scratch2, Operand(SIGN_EXT_IMM16(0x8000)));
2375 cmp(scratch1, scratch2);
2376 bind(&done);
2377 #endif
2378 }
2379
TestDoubleSign(DoubleRegister input,Register scratch)2380 void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
2381 #if V8_TARGET_ARCH_PPC64
2382 MovDoubleToInt64(scratch, input);
2383 #else
2384 MovDoubleHighToInt(scratch, input);
2385 #endif
2386 cmpi(scratch, Operand::Zero());
2387 }
2388
TestHeapNumberSign(Register input,Register scratch)2389 void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
2390 #if V8_TARGET_ARCH_PPC64
2391 LoadP(scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2392 #else
2393 lwz(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset));
2394 #endif
2395 cmpi(scratch, Operand::Zero());
2396 }
2397
TryDoubleToInt32Exact(Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch)2398 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2399 DoubleRegister double_input,
2400 Register scratch,
2401 DoubleRegister double_scratch) {
2402 Label done;
2403 DCHECK(!double_input.is(double_scratch));
2404
2405 ConvertDoubleToInt64(double_input,
2406 #if !V8_TARGET_ARCH_PPC64
2407 scratch,
2408 #endif
2409 result, double_scratch);
2410
2411 #if V8_TARGET_ARCH_PPC64
2412 TestIfInt32(result, r0);
2413 #else
2414 TestIfInt32(scratch, result, r0);
2415 #endif
2416 bne(&done);
2417
2418 // convert back and compare
2419 fcfid(double_scratch, double_scratch);
2420 fcmpu(double_scratch, double_input);
2421 bind(&done);
2422 }
2423
2424
TryInt32Floor(Register result,DoubleRegister double_input,Register input_high,Register scratch,DoubleRegister double_scratch,Label * done,Label * exact)2425 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2426 Register input_high, Register scratch,
2427 DoubleRegister double_scratch, Label* done,
2428 Label* exact) {
2429 DCHECK(!result.is(input_high));
2430 DCHECK(!double_input.is(double_scratch));
2431 Label exception;
2432
2433 MovDoubleHighToInt(input_high, double_input);
2434
2435 // Test for NaN/Inf
2436 ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2437 cmpli(result, Operand(0x7ff));
2438 beq(&exception);
2439
2440 // Convert (rounding to -Inf)
2441 ConvertDoubleToInt64(double_input,
2442 #if !V8_TARGET_ARCH_PPC64
2443 scratch,
2444 #endif
2445 result, double_scratch, kRoundToMinusInf);
2446
2447 // Test for overflow
2448 #if V8_TARGET_ARCH_PPC64
2449 TestIfInt32(result, r0);
2450 #else
2451 TestIfInt32(scratch, result, r0);
2452 #endif
2453 bne(&exception);
2454
2455 // Test for exactness
2456 fcfid(double_scratch, double_scratch);
2457 fcmpu(double_scratch, double_input);
2458 beq(exact);
2459 b(done);
2460
2461 bind(&exception);
2462 }
2463
2464
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)2465 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2466 DoubleRegister double_input,
2467 Label* done) {
2468 DoubleRegister double_scratch = kScratchDoubleReg;
2469 #if !V8_TARGET_ARCH_PPC64
2470 Register scratch = ip;
2471 #endif
2472
2473 ConvertDoubleToInt64(double_input,
2474 #if !V8_TARGET_ARCH_PPC64
2475 scratch,
2476 #endif
2477 result, double_scratch);
2478
2479 // Test for overflow
2480 #if V8_TARGET_ARCH_PPC64
2481 TestIfInt32(result, r0);
2482 #else
2483 TestIfInt32(scratch, result, r0);
2484 #endif
2485 beq(done);
2486 }
2487
2488
TruncateDoubleToI(Register result,DoubleRegister double_input)2489 void MacroAssembler::TruncateDoubleToI(Register result,
2490 DoubleRegister double_input) {
2491 Label done;
2492
2493 TryInlineTruncateDoubleToI(result, double_input, &done);
2494
2495 // If we fell through then inline version didn't succeed - call stub instead.
2496 mflr(r0);
2497 push(r0);
2498 // Put input on stack.
2499 stfdu(double_input, MemOperand(sp, -kDoubleSize));
2500
2501 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2502 CallStub(&stub);
2503
2504 addi(sp, sp, Operand(kDoubleSize));
2505 pop(r0);
2506 mtlr(r0);
2507
2508 bind(&done);
2509 }
2510
2511
TruncateHeapNumberToI(Register result,Register object)2512 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2513 Label done;
2514 DoubleRegister double_scratch = kScratchDoubleReg;
2515 DCHECK(!result.is(object));
2516
2517 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2518 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2519
2520 // If we fell through then inline version didn't succeed - call stub instead.
2521 mflr(r0);
2522 push(r0);
2523 DoubleToIStub stub(isolate(), object, result,
2524 HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2525 CallStub(&stub);
2526 pop(r0);
2527 mtlr(r0);
2528
2529 bind(&done);
2530 }
2531
2532
TruncateNumberToI(Register object,Register result,Register heap_number_map,Register scratch1,Label * not_number)2533 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2534 Register heap_number_map,
2535 Register scratch1, Label* not_number) {
2536 Label done;
2537 DCHECK(!result.is(object));
2538
2539 UntagAndJumpIfSmi(result, object, &done);
2540 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2541 TruncateHeapNumberToI(result, object);
2542
2543 bind(&done);
2544 }
2545
2546
GetLeastBitsFromSmi(Register dst,Register src,int num_least_bits)2547 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2548 int num_least_bits) {
2549 #if V8_TARGET_ARCH_PPC64
2550 rldicl(dst, src, kBitsPerPointer - kSmiShift,
2551 kBitsPerPointer - num_least_bits);
2552 #else
2553 rlwinm(dst, src, kBitsPerPointer - kSmiShift,
2554 kBitsPerPointer - num_least_bits, 31);
2555 #endif
2556 }
2557
2558
GetLeastBitsFromInt32(Register dst,Register src,int num_least_bits)2559 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2560 int num_least_bits) {
2561 rlwinm(dst, src, 0, 32 - num_least_bits, 31);
2562 }
2563
2564
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)2565 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2566 SaveFPRegsMode save_doubles) {
2567 // All parameters are on the stack. r3 has the return value after call.
2568
2569 // If the expected number of arguments of the runtime function is
2570 // constant, we check that the actual number of arguments match the
2571 // expectation.
2572 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2573
2574 // TODO(1236192): Most runtime routines don't need the number of
2575 // arguments passed in because it is constant. At some point we
2576 // should remove this need and make the runtime routine entry code
2577 // smarter.
2578 mov(r3, Operand(num_arguments));
2579 mov(r4, Operand(ExternalReference(f, isolate())));
2580 CEntryStub stub(isolate(),
2581 #if V8_TARGET_ARCH_PPC64
2582 f->result_size,
2583 #else
2584 1,
2585 #endif
2586 save_doubles);
2587 CallStub(&stub);
2588 }
2589
2590
CallExternalReference(const ExternalReference & ext,int num_arguments)2591 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2592 int num_arguments) {
2593 mov(r3, Operand(num_arguments));
2594 mov(r4, Operand(ext));
2595
2596 CEntryStub stub(isolate(), 1);
2597 CallStub(&stub);
2598 }
2599
2600
TailCallRuntime(Runtime::FunctionId fid)2601 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2602 const Runtime::Function* function = Runtime::FunctionForId(fid);
2603 DCHECK_EQ(1, function->result_size);
2604 if (function->nargs >= 0) {
2605 mov(r3, Operand(function->nargs));
2606 }
2607 JumpToExternalReference(ExternalReference(fid, isolate()));
2608 }
2609
2610
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)2611 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2612 bool builtin_exit_frame) {
2613 mov(r4, Operand(builtin));
2614 CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
2615 builtin_exit_frame);
2616 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2617 }
2618
2619
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2620 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2621 Register scratch1, Register scratch2) {
2622 if (FLAG_native_code_counters && counter->Enabled()) {
2623 mov(scratch1, Operand(value));
2624 mov(scratch2, Operand(ExternalReference(counter)));
2625 stw(scratch1, MemOperand(scratch2));
2626 }
2627 }
2628
2629
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2630 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2631 Register scratch1, Register scratch2) {
2632 DCHECK(value > 0);
2633 if (FLAG_native_code_counters && counter->Enabled()) {
2634 mov(scratch2, Operand(ExternalReference(counter)));
2635 lwz(scratch1, MemOperand(scratch2));
2636 addi(scratch1, scratch1, Operand(value));
2637 stw(scratch1, MemOperand(scratch2));
2638 }
2639 }
2640
2641
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2642 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2643 Register scratch1, Register scratch2) {
2644 DCHECK(value > 0);
2645 if (FLAG_native_code_counters && counter->Enabled()) {
2646 mov(scratch2, Operand(ExternalReference(counter)));
2647 lwz(scratch1, MemOperand(scratch2));
2648 subi(scratch1, scratch1, Operand(value));
2649 stw(scratch1, MemOperand(scratch2));
2650 }
2651 }
2652
2653
Assert(Condition cond,BailoutReason reason,CRegister cr)2654 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2655 CRegister cr) {
2656 if (emit_debug_code()) Check(cond, reason, cr);
2657 }
2658
2659
AssertFastElements(Register elements)2660 void MacroAssembler::AssertFastElements(Register elements) {
2661 if (emit_debug_code()) {
2662 DCHECK(!elements.is(r0));
2663 Label ok;
2664 push(elements);
2665 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2666 LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
2667 cmp(elements, r0);
2668 beq(&ok);
2669 LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
2670 cmp(elements, r0);
2671 beq(&ok);
2672 LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
2673 cmp(elements, r0);
2674 beq(&ok);
2675 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2676 bind(&ok);
2677 pop(elements);
2678 }
2679 }
2680
2681
Check(Condition cond,BailoutReason reason,CRegister cr)2682 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2683 Label L;
2684 b(cond, &L, cr);
2685 Abort(reason);
2686 // will not return here
2687 bind(&L);
2688 }
2689
2690
Abort(BailoutReason reason)2691 void MacroAssembler::Abort(BailoutReason reason) {
2692 Label abort_start;
2693 bind(&abort_start);
2694 #ifdef DEBUG
2695 const char* msg = GetBailoutReason(reason);
2696 if (msg != NULL) {
2697 RecordComment("Abort message: ");
2698 RecordComment(msg);
2699 }
2700
2701 if (FLAG_trap_on_abort) {
2702 stop(msg);
2703 return;
2704 }
2705 #endif
2706
2707 // Check if Abort() has already been initialized.
2708 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
2709
2710 LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
2711
2712 // Disable stub call restrictions to always allow calls to abort.
2713 if (!has_frame_) {
2714 // We don't actually want to generate a pile of code for this, so just
2715 // claim there is a stack frame, without generating one.
2716 FrameScope scope(this, StackFrame::NONE);
2717 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2718 } else {
2719 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2720 }
2721 // will not return here
2722 }
2723
2724
LoadContext(Register dst,int context_chain_length)2725 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2726 if (context_chain_length > 0) {
2727 // Move up the chain of contexts to the context containing the slot.
2728 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2729 for (int i = 1; i < context_chain_length; i++) {
2730 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2731 }
2732 } else {
2733 // Slot is in the current function context. Move it into the
2734 // destination register in case we store into it (the write barrier
2735 // cannot be allowed to destroy the context in esi).
2736 mr(dst, cp);
2737 }
2738 }
2739
2740
LoadTransitionedArrayMapConditional(ElementsKind expected_kind,ElementsKind transitioned_kind,Register map_in_out,Register scratch,Label * no_map_match)2741 void MacroAssembler::LoadTransitionedArrayMapConditional(
2742 ElementsKind expected_kind, ElementsKind transitioned_kind,
2743 Register map_in_out, Register scratch, Label* no_map_match) {
2744 DCHECK(IsFastElementsKind(expected_kind));
2745 DCHECK(IsFastElementsKind(transitioned_kind));
2746
2747 // Check that the function's map is the same as the expected cached map.
2748 LoadP(scratch, NativeContextMemOperand());
2749 LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
2750 cmp(map_in_out, ip);
2751 bne(no_map_match);
2752
2753 // Use the transitioned cached map.
2754 LoadP(map_in_out,
2755 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
2756 }
2757
2758
LoadNativeContextSlot(int index,Register dst)2759 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2760 LoadP(dst, NativeContextMemOperand());
2761 LoadP(dst, ContextMemOperand(dst, index));
2762 }
2763
2764
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)2765 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2766 Register map,
2767 Register scratch) {
2768 // Load the initial map. The global functions all have initial maps.
2769 LoadP(map,
2770 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2771 if (emit_debug_code()) {
2772 Label ok, fail;
2773 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2774 b(&ok);
2775 bind(&fail);
2776 Abort(kGlobalFunctionsMustHaveInitialMap);
2777 bind(&ok);
2778 }
2779 }
2780
2781
JumpIfNotPowerOfTwoOrZero(Register reg,Register scratch,Label * not_power_of_two_or_zero)2782 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2783 Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2784 subi(scratch, reg, Operand(1));
2785 cmpi(scratch, Operand::Zero());
2786 blt(not_power_of_two_or_zero);
2787 and_(r0, scratch, reg, SetRC);
2788 bne(not_power_of_two_or_zero, cr0);
2789 }
2790
2791
JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,Register scratch,Label * zero_and_neg,Label * not_power_of_two)2792 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2793 Register scratch,
2794 Label* zero_and_neg,
2795 Label* not_power_of_two) {
2796 subi(scratch, reg, Operand(1));
2797 cmpi(scratch, Operand::Zero());
2798 blt(zero_and_neg);
2799 and_(r0, scratch, reg, SetRC);
2800 bne(not_power_of_two, cr0);
2801 }
2802
2803 #if !V8_TARGET_ARCH_PPC64
SmiTagCheckOverflow(Register reg,Register overflow)2804 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2805 DCHECK(!reg.is(overflow));
2806 mr(overflow, reg); // Save original value.
2807 SmiTag(reg);
2808 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0.
2809 }
2810
2811
SmiTagCheckOverflow(Register dst,Register src,Register overflow)2812 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2813 Register overflow) {
2814 if (dst.is(src)) {
2815 // Fall back to slower case.
2816 SmiTagCheckOverflow(dst, overflow);
2817 } else {
2818 DCHECK(!dst.is(src));
2819 DCHECK(!dst.is(overflow));
2820 DCHECK(!src.is(overflow));
2821 SmiTag(dst, src);
2822 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
2823 }
2824 }
2825 #endif
2826
JumpIfNotBothSmi(Register reg1,Register reg2,Label * on_not_both_smi)2827 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2828 Label* on_not_both_smi) {
2829 STATIC_ASSERT(kSmiTag == 0);
2830 orx(r0, reg1, reg2, LeaveRC);
2831 JumpIfNotSmi(r0, on_not_both_smi);
2832 }
2833
2834
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)2835 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2836 Label* smi_case) {
2837 STATIC_ASSERT(kSmiTag == 0);
2838 TestBitRange(src, kSmiTagSize - 1, 0, r0);
2839 SmiUntag(dst, src);
2840 beq(smi_case, cr0);
2841 }
2842
2843
UntagAndJumpIfNotSmi(Register dst,Register src,Label * non_smi_case)2844 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2845 Label* non_smi_case) {
2846 STATIC_ASSERT(kSmiTag == 0);
2847 TestBitRange(src, kSmiTagSize - 1, 0, r0);
2848 SmiUntag(dst, src);
2849 bne(non_smi_case, cr0);
2850 }
2851
2852
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)2853 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2854 Label* on_either_smi) {
2855 STATIC_ASSERT(kSmiTag == 0);
2856 JumpIfSmi(reg1, on_either_smi);
2857 JumpIfSmi(reg2, on_either_smi);
2858 }
2859
AssertNotNumber(Register object)2860 void MacroAssembler::AssertNotNumber(Register object) {
2861 if (emit_debug_code()) {
2862 STATIC_ASSERT(kSmiTag == 0);
2863 TestIfSmi(object, r0);
2864 Check(ne, kOperandIsANumber, cr0);
2865 push(object);
2866 CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
2867 pop(object);
2868 Check(ne, kOperandIsANumber);
2869 }
2870 }
2871
AssertNotSmi(Register object)2872 void MacroAssembler::AssertNotSmi(Register object) {
2873 if (emit_debug_code()) {
2874 STATIC_ASSERT(kSmiTag == 0);
2875 TestIfSmi(object, r0);
2876 Check(ne, kOperandIsASmi, cr0);
2877 }
2878 }
2879
2880
AssertSmi(Register object)2881 void MacroAssembler::AssertSmi(Register object) {
2882 if (emit_debug_code()) {
2883 STATIC_ASSERT(kSmiTag == 0);
2884 TestIfSmi(object, r0);
2885 Check(eq, kOperandIsNotSmi, cr0);
2886 }
2887 }
2888
2889
AssertString(Register object)2890 void MacroAssembler::AssertString(Register object) {
2891 if (emit_debug_code()) {
2892 STATIC_ASSERT(kSmiTag == 0);
2893 TestIfSmi(object, r0);
2894 Check(ne, kOperandIsASmiAndNotAString, cr0);
2895 push(object);
2896 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2897 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2898 pop(object);
2899 Check(lt, kOperandIsNotAString);
2900 }
2901 }
2902
2903
AssertName(Register object)2904 void MacroAssembler::AssertName(Register object) {
2905 if (emit_debug_code()) {
2906 STATIC_ASSERT(kSmiTag == 0);
2907 TestIfSmi(object, r0);
2908 Check(ne, kOperandIsASmiAndNotAName, cr0);
2909 push(object);
2910 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2911 CompareInstanceType(object, object, LAST_NAME_TYPE);
2912 pop(object);
2913 Check(le, kOperandIsNotAName);
2914 }
2915 }
2916
2917
AssertFunction(Register object)2918 void MacroAssembler::AssertFunction(Register object) {
2919 if (emit_debug_code()) {
2920 STATIC_ASSERT(kSmiTag == 0);
2921 TestIfSmi(object, r0);
2922 Check(ne, kOperandIsASmiAndNotAFunction, cr0);
2923 push(object);
2924 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2925 pop(object);
2926 Check(eq, kOperandIsNotAFunction);
2927 }
2928 }
2929
2930
AssertBoundFunction(Register object)2931 void MacroAssembler::AssertBoundFunction(Register object) {
2932 if (emit_debug_code()) {
2933 STATIC_ASSERT(kSmiTag == 0);
2934 TestIfSmi(object, r0);
2935 Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
2936 push(object);
2937 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2938 pop(object);
2939 Check(eq, kOperandIsNotABoundFunction);
2940 }
2941 }
2942
AssertGeneratorObject(Register object)2943 void MacroAssembler::AssertGeneratorObject(Register object) {
2944 if (emit_debug_code()) {
2945 STATIC_ASSERT(kSmiTag == 0);
2946 TestIfSmi(object, r0);
2947 Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
2948 push(object);
2949 CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
2950 pop(object);
2951 Check(eq, kOperandIsNotAGeneratorObject);
2952 }
2953 }
2954
AssertReceiver(Register object)2955 void MacroAssembler::AssertReceiver(Register object) {
2956 if (emit_debug_code()) {
2957 STATIC_ASSERT(kSmiTag == 0);
2958 TestIfSmi(object, r0);
2959 Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
2960 push(object);
2961 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2962 CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
2963 pop(object);
2964 Check(ge, kOperandIsNotAReceiver);
2965 }
2966 }
2967
AssertUndefinedOrAllocationSite(Register object,Register scratch)2968 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2969 Register scratch) {
2970 if (emit_debug_code()) {
2971 Label done_checking;
2972 AssertNotSmi(object);
2973 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2974 beq(&done_checking);
2975 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2976 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2977 Assert(eq, kExpectedUndefinedOrCell);
2978 bind(&done_checking);
2979 }
2980 }
2981
2982
AssertIsRoot(Register reg,Heap::RootListIndex index)2983 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2984 if (emit_debug_code()) {
2985 CompareRoot(reg, index);
2986 Check(eq, kHeapNumberMapRegisterClobbered);
2987 }
2988 }
2989
2990
JumpIfNotHeapNumber(Register object,Register heap_number_map,Register scratch,Label * on_not_heap_number)2991 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2992 Register heap_number_map,
2993 Register scratch,
2994 Label* on_not_heap_number) {
2995 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2996 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2997 cmp(scratch, heap_number_map);
2998 bne(on_not_heap_number);
2999 }
3000
3001
JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3002 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
3003 Register first, Register second, Register scratch1, Register scratch2,
3004 Label* failure) {
3005 // Test that both first and second are sequential one-byte strings.
3006 // Assume that they are non-smis.
3007 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3008 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3009 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3010 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3011
3012 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
3013 scratch2, failure);
3014 }
3015
JumpIfNotBothSequentialOneByteStrings(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3016 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
3017 Register second,
3018 Register scratch1,
3019 Register scratch2,
3020 Label* failure) {
3021 // Check that neither is a smi.
3022 and_(scratch1, first, second);
3023 JumpIfSmi(scratch1, failure);
3024 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
3025 scratch2, failure);
3026 }
3027
3028
JumpIfNotUniqueNameInstanceType(Register reg,Label * not_unique_name)3029 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3030 Label* not_unique_name) {
3031 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3032 Label succeed;
3033 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3034 beq(&succeed, cr0);
3035 cmpi(reg, Operand(SYMBOL_TYPE));
3036 bne(not_unique_name);
3037
3038 bind(&succeed);
3039 }
3040
3041
3042 // Allocates a heap number or jumps to the need_gc label if the young space
3043 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required,MutableMode mode)3044 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
3045 Register scratch2,
3046 Register heap_number_map,
3047 Label* gc_required,
3048 MutableMode mode) {
3049 // Allocate an object in the heap for the heap number and tag it as a heap
3050 // object.
3051 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3052 NO_ALLOCATION_FLAGS);
3053
3054 Heap::RootListIndex map_index = mode == MUTABLE
3055 ? Heap::kMutableHeapNumberMapRootIndex
3056 : Heap::kHeapNumberMapRootIndex;
3057 AssertIsRoot(heap_number_map, map_index);
3058
3059 // Store heap number map in the allocated object.
3060 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
3061 r0);
3062 }
3063
3064
AllocateHeapNumberWithValue(Register result,DoubleRegister value,Register scratch1,Register scratch2,Register heap_number_map,Label * gc_required)3065 void MacroAssembler::AllocateHeapNumberWithValue(
3066 Register result, DoubleRegister value, Register scratch1, Register scratch2,
3067 Register heap_number_map, Label* gc_required) {
3068 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3069 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3070 }
3071
3072
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)3073 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3074 Register value, Register scratch1,
3075 Register scratch2, Label* gc_required) {
3076 DCHECK(!result.is(constructor));
3077 DCHECK(!result.is(scratch1));
3078 DCHECK(!result.is(scratch2));
3079 DCHECK(!result.is(value));
3080
3081 // Allocate JSValue in new space.
3082 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3083 NO_ALLOCATION_FLAGS);
3084
3085 // Initialize the JSValue.
3086 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3087 StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
3088 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3089 StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
3090 StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
3091 StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
3092 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3093 }
3094
InitializeNFieldsWithFiller(Register current_address,Register count,Register filler)3095 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
3096 Register count,
3097 Register filler) {
3098 Label loop;
3099 mtctr(count);
3100 bind(&loop);
3101 StoreP(filler, MemOperand(current_address));
3102 addi(current_address, current_address, Operand(kPointerSize));
3103 bdnz(&loop);
3104 }
3105
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)3106 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
3107 Register end_address,
3108 Register filler) {
3109 Label done;
3110 sub(r0, end_address, current_address, LeaveOE, SetRC);
3111 beq(&done, cr0);
3112 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
3113 InitializeNFieldsWithFiller(current_address, r0, filler);
3114 bind(&done);
3115 }
3116
3117
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)3118 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3119 Register first, Register second, Register scratch1, Register scratch2,
3120 Label* failure) {
3121 const int kFlatOneByteStringMask =
3122 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3123 const int kFlatOneByteStringTag =
3124 kStringTag | kOneByteStringTag | kSeqStringTag;
3125 andi(scratch1, first, Operand(kFlatOneByteStringMask));
3126 andi(scratch2, second, Operand(kFlatOneByteStringMask));
3127 cmpi(scratch1, Operand(kFlatOneByteStringTag));
3128 bne(failure);
3129 cmpi(scratch2, Operand(kFlatOneByteStringTag));
3130 bne(failure);
3131 }
3132
3133
JumpIfInstanceTypeIsNotSequentialOneByte(Register type,Register scratch,Label * failure)3134 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3135 Register scratch,
3136 Label* failure) {
3137 const int kFlatOneByteStringMask =
3138 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3139 const int kFlatOneByteStringTag =
3140 kStringTag | kOneByteStringTag | kSeqStringTag;
3141 andi(scratch, type, Operand(kFlatOneByteStringMask));
3142 cmpi(scratch, Operand(kFlatOneByteStringTag));
3143 bne(failure);
3144 }
3145
3146 static const int kRegisterPassedArguments = 8;
3147
3148
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)3149 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3150 int num_double_arguments) {
3151 int stack_passed_words = 0;
3152 if (num_double_arguments > DoubleRegister::kNumRegisters) {
3153 stack_passed_words +=
3154 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3155 }
3156 // Up to 8 simple arguments are passed in registers r3..r10.
3157 if (num_reg_arguments > kRegisterPassedArguments) {
3158 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3159 }
3160 return stack_passed_words;
3161 }
3162
3163
EmitSeqStringSetCharCheck(Register string,Register index,Register value,uint32_t encoding_mask)3164 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
3165 Register value,
3166 uint32_t encoding_mask) {
3167 Label is_object;
3168 TestIfSmi(string, r0);
3169 Check(ne, kNonObject, cr0);
3170
3171 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3172 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3173
3174 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3175 cmpi(ip, Operand(encoding_mask));
3176 Check(eq, kUnexpectedStringType);
3177
3178 // The index is assumed to be untagged coming in, tag it to compare with the
3179 // string length without using a temp register, it is restored at the end of
3180 // this function.
3181 #if !V8_TARGET_ARCH_PPC64
3182 Label index_tag_ok, index_tag_bad;
3183 JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
3184 #endif
3185 SmiTag(index, index);
3186 #if !V8_TARGET_ARCH_PPC64
3187 b(&index_tag_ok);
3188 bind(&index_tag_bad);
3189 Abort(kIndexIsTooLarge);
3190 bind(&index_tag_ok);
3191 #endif
3192
3193 LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
3194 cmp(index, ip);
3195 Check(lt, kIndexIsTooLarge);
3196
3197 DCHECK(Smi::kZero == 0);
3198 cmpi(index, Operand::Zero());
3199 Check(ge, kIndexIsNegative);
3200
3201 SmiUntag(index, index);
3202 }
3203
3204
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)3205 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3206 int num_double_arguments,
3207 Register scratch) {
3208 int frame_alignment = ActivationFrameAlignment();
3209 int stack_passed_arguments =
3210 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3211 int stack_space = kNumRequiredStackFrameSlots;
3212
3213 if (frame_alignment > kPointerSize) {
3214 // Make stack end at alignment and make room for stack arguments
3215 // -- preserving original value of sp.
3216 mr(scratch, sp);
3217 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
3218 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3219 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3220 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3221 } else {
3222 // Make room for stack arguments
3223 stack_space += stack_passed_arguments;
3224 }
3225
3226 // Allocate frame with required slots to make ABI work.
3227 li(r0, Operand::Zero());
3228 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
3229 }
3230
3231
PrepareCallCFunction(int num_reg_arguments,Register scratch)3232 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3233 Register scratch) {
3234 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3235 }
3236
3237
MovToFloatParameter(DoubleRegister src)3238 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
3239
3240
MovToFloatResult(DoubleRegister src)3241 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
3242
3243
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)3244 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3245 DoubleRegister src2) {
3246 if (src2.is(d1)) {
3247 DCHECK(!src1.is(d2));
3248 Move(d2, src2);
3249 Move(d1, src1);
3250 } else {
3251 Move(d1, src1);
3252 Move(d2, src2);
3253 }
3254 }
3255
3256
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)3257 void MacroAssembler::CallCFunction(ExternalReference function,
3258 int num_reg_arguments,
3259 int num_double_arguments) {
3260 mov(ip, Operand(function));
3261 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3262 }
3263
3264
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)3265 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3266 int num_double_arguments) {
3267 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3268 }
3269
3270
CallCFunction(ExternalReference function,int num_arguments)3271 void MacroAssembler::CallCFunction(ExternalReference function,
3272 int num_arguments) {
3273 CallCFunction(function, num_arguments, 0);
3274 }
3275
3276
CallCFunction(Register function,int num_arguments)3277 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3278 CallCFunction(function, num_arguments, 0);
3279 }
3280
3281
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)3282 void MacroAssembler::CallCFunctionHelper(Register function,
3283 int num_reg_arguments,
3284 int num_double_arguments) {
3285 DCHECK(has_frame());
3286
3287 // Just call directly. The function called cannot cause a GC, or
3288 // allow preemption, so the return address in the link register
3289 // stays correct.
3290 Register dest = function;
3291 if (ABI_USES_FUNCTION_DESCRIPTORS) {
3292 // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
3293 // aware of this descriptor and pick up values from it
3294 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
3295 LoadP(ip, MemOperand(function, 0));
3296 dest = ip;
3297 } else if (ABI_CALL_VIA_IP) {
3298 Move(ip, function);
3299 dest = ip;
3300 }
3301
3302 Call(dest);
3303
3304 // Remove frame bought in PrepareCallCFunction
3305 int stack_passed_arguments =
3306 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3307 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3308 if (ActivationFrameAlignment() > kPointerSize) {
3309 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3310 } else {
3311 addi(sp, sp, Operand(stack_space * kPointerSize));
3312 }
3313 }
3314
3315
DecodeConstantPoolOffset(Register result,Register location)3316 void MacroAssembler::DecodeConstantPoolOffset(Register result,
3317 Register location) {
3318 Label overflow_access, done;
3319 DCHECK(!AreAliased(result, location, r0));
3320
3321 // Determine constant pool access type
3322 // Caller has already placed the instruction word at location in result.
3323 ExtractBitRange(r0, result, 31, 26);
3324 cmpi(r0, Operand(ADDIS >> 26));
3325 beq(&overflow_access);
3326
3327 // Regular constant pool access
3328 // extract the load offset
3329 andi(result, result, Operand(kImm16Mask));
3330 b(&done);
3331
3332 bind(&overflow_access);
3333 // Overflow constant pool access
3334 // shift addis immediate
3335 slwi(r0, result, Operand(16));
3336 // sign-extend and add the load offset
3337 lwz(result, MemOperand(location, kInstrSize));
3338 extsh(result, result);
3339 add(result, r0, result);
3340
3341 bind(&done);
3342 }
3343
3344
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)3345 void MacroAssembler::CheckPageFlag(
3346 Register object,
3347 Register scratch, // scratch may be same register as object
3348 int mask, Condition cc, Label* condition_met) {
3349 DCHECK(cc == ne || cc == eq);
3350 ClearRightImm(scratch, object, Operand(kPageSizeBits));
3351 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3352
3353 And(r0, scratch, Operand(mask), SetRC);
3354
3355 if (cc == ne) {
3356 bne(condition_met, cr0);
3357 }
3358 if (cc == eq) {
3359 beq(condition_met, cr0);
3360 }
3361 }
3362
3363
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)3364 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3365 Register scratch1, Label* on_black) {
3366 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
3367 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3368 }
3369
3370
HasColor(Register object,Register bitmap_scratch,Register mask_scratch,Label * has_color,int first_bit,int second_bit)3371 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3372 Register mask_scratch, Label* has_color,
3373 int first_bit, int second_bit) {
3374 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3375
3376 GetMarkBits(object, bitmap_scratch, mask_scratch);
3377
3378 Label other_color, word_boundary;
3379 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3380 // Test the first bit
3381 and_(r0, ip, mask_scratch, SetRC);
3382 b(first_bit == 1 ? eq : ne, &other_color, cr0);
3383 // Shift left 1
3384 // May need to load the next cell
3385 slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
3386 beq(&word_boundary, cr0);
3387 // Test the second bit
3388 and_(r0, ip, mask_scratch, SetRC);
3389 b(second_bit == 1 ? ne : eq, has_color, cr0);
3390 b(&other_color);
3391
3392 bind(&word_boundary);
3393 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3394 andi(r0, ip, Operand(1));
3395 b(second_bit == 1 ? ne : eq, has_color, cr0);
3396 bind(&other_color);
3397 }
3398
3399
GetMarkBits(Register addr_reg,Register bitmap_reg,Register mask_reg)3400 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3401 Register mask_reg) {
3402 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3403 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3404 lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
3405 and_(bitmap_reg, addr_reg, r0);
3406 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3407 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3408 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3409 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3410 add(bitmap_reg, bitmap_reg, ip);
3411 li(ip, Operand(1));
3412 slw(mask_reg, ip, mask_reg);
3413 }
3414
3415
JumpIfWhite(Register value,Register bitmap_scratch,Register mask_scratch,Register load_scratch,Label * value_is_white)3416 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
3417 Register mask_scratch, Register load_scratch,
3418 Label* value_is_white) {
3419 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3420 GetMarkBits(value, bitmap_scratch, mask_scratch);
3421
3422 // If the value is black or grey we don't need to do anything.
3423 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3424 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
3425 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
3426 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3427
3428 // Since both black and grey have a 1 in the first position and white does
3429 // not have a 1 there we only need to check one bit.
3430 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3431 and_(r0, mask_scratch, load_scratch, SetRC);
3432 beq(value_is_white, cr0);
3433 }
3434
3435
3436 // Saturate a value into 8-bit unsigned integer
3437 // if input_value < 0, output_value is 0
3438 // if input_value > 255, output_value is 255
3439 // otherwise output_value is the input_value
ClampUint8(Register output_reg,Register input_reg)3440 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3441 int satval = (1 << 8) - 1;
3442
3443 if (CpuFeatures::IsSupported(ISELECT)) {
3444 // set to 0 if negative
3445 cmpi(input_reg, Operand::Zero());
3446 isel(lt, output_reg, r0, input_reg);
3447
3448 // set to satval if > satval
3449 li(r0, Operand(satval));
3450 cmpi(output_reg, Operand(satval));
3451 isel(lt, output_reg, output_reg, r0);
3452 } else {
3453 Label done, negative_label, overflow_label;
3454 cmpi(input_reg, Operand::Zero());
3455 blt(&negative_label);
3456
3457 cmpi(input_reg, Operand(satval));
3458 bgt(&overflow_label);
3459 if (!output_reg.is(input_reg)) {
3460 mr(output_reg, input_reg);
3461 }
3462 b(&done);
3463
3464 bind(&negative_label);
3465 li(output_reg, Operand::Zero()); // set to 0 if negative
3466 b(&done);
3467
3468 bind(&overflow_label); // set to satval if > satval
3469 li(output_reg, Operand(satval));
3470
3471 bind(&done);
3472 }
3473 }
3474
3475
SetRoundingMode(FPRoundingMode RN)3476 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
3477
3478
ResetRoundingMode()3479 void MacroAssembler::ResetRoundingMode() {
3480 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
3481 }
3482
3483
ClampDoubleToUint8(Register result_reg,DoubleRegister input_reg,DoubleRegister double_scratch)3484 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3485 DoubleRegister input_reg,
3486 DoubleRegister double_scratch) {
3487 Label above_zero;
3488 Label done;
3489 Label in_bounds;
3490
3491 LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3492 fcmpu(input_reg, double_scratch);
3493 bgt(&above_zero);
3494
3495 // Double value is less than zero, NaN or Inf, return 0.
3496 LoadIntLiteral(result_reg, 0);
3497 b(&done);
3498
3499 // Double value is >= 255, return 255.
3500 bind(&above_zero);
3501 LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3502 fcmpu(input_reg, double_scratch);
3503 ble(&in_bounds);
3504 LoadIntLiteral(result_reg, 255);
3505 b(&done);
3506
3507 // In 0-255 range, round and truncate.
3508 bind(&in_bounds);
3509
3510 // round to nearest (default rounding mode)
3511 fctiw(double_scratch, input_reg);
3512 MovDoubleLowToInt(result_reg, double_scratch);
3513 bind(&done);
3514 }
3515
3516
LoadInstanceDescriptors(Register map,Register descriptors)3517 void MacroAssembler::LoadInstanceDescriptors(Register map,
3518 Register descriptors) {
3519 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3520 }
3521
3522
NumberOfOwnDescriptors(Register dst,Register map)3523 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3524 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3525 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3526 }
3527
3528
EnumLength(Register dst,Register map)3529 void MacroAssembler::EnumLength(Register dst, Register map) {
3530 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3531 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3532 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
3533 SmiTag(dst);
3534 }
3535
3536
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)3537 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3538 int accessor_index,
3539 AccessorComponent accessor) {
3540 LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3541 LoadInstanceDescriptors(dst, dst);
3542 LoadP(dst,
3543 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3544 const int getterOffset = AccessorPair::kGetterOffset;
3545 const int setterOffset = AccessorPair::kSetterOffset;
3546 int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3547 LoadP(dst, FieldMemOperand(dst, offset));
3548 }
3549
3550
CheckEnumCache(Label * call_runtime)3551 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3552 Register null_value = r8;
3553 Register empty_fixed_array_value = r9;
3554 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3555 Label next, start;
3556 mr(r5, r3);
3557
3558 // Check if the enum length field is properly initialized, indicating that
3559 // there is an enum cache.
3560 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3561
3562 EnumLength(r6, r4);
3563 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3564 beq(call_runtime);
3565
3566 LoadRoot(null_value, Heap::kNullValueRootIndex);
3567 b(&start);
3568
3569 bind(&next);
3570 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3571
3572 // For all objects but the receiver, check that the cache is empty.
3573 EnumLength(r6, r4);
3574 CmpSmiLiteral(r6, Smi::kZero, r0);
3575 bne(call_runtime);
3576
3577 bind(&start);
3578
3579 // Check that there are no elements. Register r5 contains the current JS
3580 // object we've reached through the prototype chain.
3581 Label no_elements;
3582 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
3583 cmp(r5, empty_fixed_array_value);
3584 beq(&no_elements);
3585
3586 // Second chance, the object may be using the empty slow element dictionary.
3587 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3588 bne(call_runtime);
3589
3590 bind(&no_elements);
3591 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
3592 cmp(r5, null_value);
3593 bne(&next);
3594 }
3595
3596
3597 ////////////////////////////////////////////////////////////////////////////////
3598 //
3599 // New MacroAssembler Interfaces added for PPC
3600 //
3601 ////////////////////////////////////////////////////////////////////////////////
LoadIntLiteral(Register dst,int value)3602 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
3603 mov(dst, Operand(value));
3604 }
3605
3606
LoadSmiLiteral(Register dst,Smi * smi)3607 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
3608 mov(dst, Operand(smi));
3609 }
3610
3611
LoadDoubleLiteral(DoubleRegister result,double value,Register scratch)3612 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
3613 Register scratch) {
3614 if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
3615 !(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) {
3616 ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
3617 if (access == ConstantPoolEntry::OVERFLOWED) {
3618 addis(scratch, kConstantPoolRegister, Operand::Zero());
3619 lfd(result, MemOperand(scratch, 0));
3620 } else {
3621 lfd(result, MemOperand(kConstantPoolRegister, 0));
3622 }
3623 return;
3624 }
3625
3626 // avoid gcc strict aliasing error using union cast
3627 union {
3628 double dval;
3629 #if V8_TARGET_ARCH_PPC64
3630 intptr_t ival;
3631 #else
3632 intptr_t ival[2];
3633 #endif
3634 } litVal;
3635
3636 litVal.dval = value;
3637
3638 #if V8_TARGET_ARCH_PPC64
3639 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3640 mov(scratch, Operand(litVal.ival));
3641 mtfprd(result, scratch);
3642 return;
3643 }
3644 #endif
3645
3646 addi(sp, sp, Operand(-kDoubleSize));
3647 #if V8_TARGET_ARCH_PPC64
3648 mov(scratch, Operand(litVal.ival));
3649 std(scratch, MemOperand(sp));
3650 #else
3651 LoadIntLiteral(scratch, litVal.ival[0]);
3652 stw(scratch, MemOperand(sp, 0));
3653 LoadIntLiteral(scratch, litVal.ival[1]);
3654 stw(scratch, MemOperand(sp, 4));
3655 #endif
3656 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3657 lfd(result, MemOperand(sp, 0));
3658 addi(sp, sp, Operand(kDoubleSize));
3659 }
3660
3661
MovIntToDouble(DoubleRegister dst,Register src,Register scratch)3662 void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
3663 Register scratch) {
3664 // sign-extend src to 64-bit
3665 #if V8_TARGET_ARCH_PPC64
3666 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3667 mtfprwa(dst, src);
3668 return;
3669 }
3670 #endif
3671
3672 DCHECK(!src.is(scratch));
3673 subi(sp, sp, Operand(kDoubleSize));
3674 #if V8_TARGET_ARCH_PPC64
3675 extsw(scratch, src);
3676 std(scratch, MemOperand(sp, 0));
3677 #else
3678 srawi(scratch, src, 31);
3679 stw(scratch, MemOperand(sp, Register::kExponentOffset));
3680 stw(src, MemOperand(sp, Register::kMantissaOffset));
3681 #endif
3682 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3683 lfd(dst, MemOperand(sp, 0));
3684 addi(sp, sp, Operand(kDoubleSize));
3685 }
3686
3687
MovUnsignedIntToDouble(DoubleRegister dst,Register src,Register scratch)3688 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
3689 Register scratch) {
3690 // zero-extend src to 64-bit
3691 #if V8_TARGET_ARCH_PPC64
3692 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3693 mtfprwz(dst, src);
3694 return;
3695 }
3696 #endif
3697
3698 DCHECK(!src.is(scratch));
3699 subi(sp, sp, Operand(kDoubleSize));
3700 #if V8_TARGET_ARCH_PPC64
3701 clrldi(scratch, src, Operand(32));
3702 std(scratch, MemOperand(sp, 0));
3703 #else
3704 li(scratch, Operand::Zero());
3705 stw(scratch, MemOperand(sp, Register::kExponentOffset));
3706 stw(src, MemOperand(sp, Register::kMantissaOffset));
3707 #endif
3708 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3709 lfd(dst, MemOperand(sp, 0));
3710 addi(sp, sp, Operand(kDoubleSize));
3711 }
3712
3713
MovInt64ToDouble(DoubleRegister dst,Register src_hi,Register src)3714 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
3715 #if !V8_TARGET_ARCH_PPC64
3716 Register src_hi,
3717 #endif
3718 Register src) {
3719 #if V8_TARGET_ARCH_PPC64
3720 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3721 mtfprd(dst, src);
3722 return;
3723 }
3724 #endif
3725
3726 subi(sp, sp, Operand(kDoubleSize));
3727 #if V8_TARGET_ARCH_PPC64
3728 std(src, MemOperand(sp, 0));
3729 #else
3730 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
3731 stw(src, MemOperand(sp, Register::kMantissaOffset));
3732 #endif
3733 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3734 lfd(dst, MemOperand(sp, 0));
3735 addi(sp, sp, Operand(kDoubleSize));
3736 }
3737
3738
3739 #if V8_TARGET_ARCH_PPC64
MovInt64ComponentsToDouble(DoubleRegister dst,Register src_hi,Register src_lo,Register scratch)3740 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
3741 Register src_hi,
3742 Register src_lo,
3743 Register scratch) {
3744 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3745 sldi(scratch, src_hi, Operand(32));
3746 rldimi(scratch, src_lo, 0, 32);
3747 mtfprd(dst, scratch);
3748 return;
3749 }
3750
3751 subi(sp, sp, Operand(kDoubleSize));
3752 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
3753 stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
3754 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3755 lfd(dst, MemOperand(sp));
3756 addi(sp, sp, Operand(kDoubleSize));
3757 }
3758 #endif
3759
3760
InsertDoubleLow(DoubleRegister dst,Register src,Register scratch)3761 void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
3762 Register scratch) {
3763 #if V8_TARGET_ARCH_PPC64
3764 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3765 mffprd(scratch, dst);
3766 rldimi(scratch, src, 0, 32);
3767 mtfprd(dst, scratch);
3768 return;
3769 }
3770 #endif
3771
3772 subi(sp, sp, Operand(kDoubleSize));
3773 stfd(dst, MemOperand(sp));
3774 stw(src, MemOperand(sp, Register::kMantissaOffset));
3775 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3776 lfd(dst, MemOperand(sp));
3777 addi(sp, sp, Operand(kDoubleSize));
3778 }
3779
3780
InsertDoubleHigh(DoubleRegister dst,Register src,Register scratch)3781 void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
3782 Register scratch) {
3783 #if V8_TARGET_ARCH_PPC64
3784 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3785 mffprd(scratch, dst);
3786 rldimi(scratch, src, 32, 0);
3787 mtfprd(dst, scratch);
3788 return;
3789 }
3790 #endif
3791
3792 subi(sp, sp, Operand(kDoubleSize));
3793 stfd(dst, MemOperand(sp));
3794 stw(src, MemOperand(sp, Register::kExponentOffset));
3795 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3796 lfd(dst, MemOperand(sp));
3797 addi(sp, sp, Operand(kDoubleSize));
3798 }
3799
3800
MovDoubleLowToInt(Register dst,DoubleRegister src)3801 void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
3802 #if V8_TARGET_ARCH_PPC64
3803 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3804 mffprwz(dst, src);
3805 return;
3806 }
3807 #endif
3808
3809 subi(sp, sp, Operand(kDoubleSize));
3810 stfd(src, MemOperand(sp));
3811 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3812 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
3813 addi(sp, sp, Operand(kDoubleSize));
3814 }
3815
3816
MovDoubleHighToInt(Register dst,DoubleRegister src)3817 void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
3818 #if V8_TARGET_ARCH_PPC64
3819 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3820 mffprd(dst, src);
3821 srdi(dst, dst, Operand(32));
3822 return;
3823 }
3824 #endif
3825
3826 subi(sp, sp, Operand(kDoubleSize));
3827 stfd(src, MemOperand(sp));
3828 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3829 lwz(dst, MemOperand(sp, Register::kExponentOffset));
3830 addi(sp, sp, Operand(kDoubleSize));
3831 }
3832
3833
MovDoubleToInt64(Register dst_hi,Register dst,DoubleRegister src)3834 void MacroAssembler::MovDoubleToInt64(
3835 #if !V8_TARGET_ARCH_PPC64
3836 Register dst_hi,
3837 #endif
3838 Register dst, DoubleRegister src) {
3839 #if V8_TARGET_ARCH_PPC64
3840 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
3841 mffprd(dst, src);
3842 return;
3843 }
3844 #endif
3845
3846 subi(sp, sp, Operand(kDoubleSize));
3847 stfd(src, MemOperand(sp));
3848 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3849 #if V8_TARGET_ARCH_PPC64
3850 ld(dst, MemOperand(sp, 0));
3851 #else
3852 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
3853 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
3854 #endif
3855 addi(sp, sp, Operand(kDoubleSize));
3856 }
3857
3858
MovIntToFloat(DoubleRegister dst,Register src)3859 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
3860 subi(sp, sp, Operand(kFloatSize));
3861 stw(src, MemOperand(sp, 0));
3862 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3863 lfs(dst, MemOperand(sp, 0));
3864 addi(sp, sp, Operand(kFloatSize));
3865 }
3866
3867
MovFloatToInt(Register dst,DoubleRegister src)3868 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
3869 subi(sp, sp, Operand(kFloatSize));
3870 frsp(src, src);
3871 stfs(src, MemOperand(sp, 0));
3872 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
3873 lwz(dst, MemOperand(sp, 0));
3874 addi(sp, sp, Operand(kFloatSize));
3875 }
3876
3877
Add(Register dst,Register src,intptr_t value,Register scratch)3878 void MacroAssembler::Add(Register dst, Register src, intptr_t value,
3879 Register scratch) {
3880 if (is_int16(value)) {
3881 addi(dst, src, Operand(value));
3882 } else {
3883 mov(scratch, Operand(value));
3884 add(dst, src, scratch);
3885 }
3886 }
3887
3888
Cmpi(Register src1,const Operand & src2,Register scratch,CRegister cr)3889 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
3890 CRegister cr) {
3891 intptr_t value = src2.immediate();
3892 if (is_int16(value)) {
3893 cmpi(src1, src2, cr);
3894 } else {
3895 mov(scratch, src2);
3896 cmp(src1, scratch, cr);
3897 }
3898 }
3899
3900
Cmpli(Register src1,const Operand & src2,Register scratch,CRegister cr)3901 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
3902 CRegister cr) {
3903 intptr_t value = src2.immediate();
3904 if (is_uint16(value)) {
3905 cmpli(src1, src2, cr);
3906 } else {
3907 mov(scratch, src2);
3908 cmpl(src1, scratch, cr);
3909 }
3910 }
3911
3912
Cmpwi(Register src1,const Operand & src2,Register scratch,CRegister cr)3913 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
3914 CRegister cr) {
3915 intptr_t value = src2.immediate();
3916 if (is_int16(value)) {
3917 cmpwi(src1, src2, cr);
3918 } else {
3919 mov(scratch, src2);
3920 cmpw(src1, scratch, cr);
3921 }
3922 }
3923
3924
Cmplwi(Register src1,const Operand & src2,Register scratch,CRegister cr)3925 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
3926 Register scratch, CRegister cr) {
3927 intptr_t value = src2.immediate();
3928 if (is_uint16(value)) {
3929 cmplwi(src1, src2, cr);
3930 } else {
3931 mov(scratch, src2);
3932 cmplw(src1, scratch, cr);
3933 }
3934 }
3935
3936
And(Register ra,Register rs,const Operand & rb,RCBit rc)3937 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
3938 RCBit rc) {
3939 if (rb.is_reg()) {
3940 and_(ra, rs, rb.rm(), rc);
3941 } else {
3942 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
3943 andi(ra, rs, rb);
3944 } else {
3945 // mov handles the relocation.
3946 DCHECK(!rs.is(r0));
3947 mov(r0, rb);
3948 and_(ra, rs, r0, rc);
3949 }
3950 }
3951 }
3952
3953
Or(Register ra,Register rs,const Operand & rb,RCBit rc)3954 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
3955 if (rb.is_reg()) {
3956 orx(ra, rs, rb.rm(), rc);
3957 } else {
3958 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
3959 ori(ra, rs, rb);
3960 } else {
3961 // mov handles the relocation.
3962 DCHECK(!rs.is(r0));
3963 mov(r0, rb);
3964 orx(ra, rs, r0, rc);
3965 }
3966 }
3967 }
3968
3969
Xor(Register ra,Register rs,const Operand & rb,RCBit rc)3970 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
3971 RCBit rc) {
3972 if (rb.is_reg()) {
3973 xor_(ra, rs, rb.rm(), rc);
3974 } else {
3975 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
3976 xori(ra, rs, rb);
3977 } else {
3978 // mov handles the relocation.
3979 DCHECK(!rs.is(r0));
3980 mov(r0, rb);
3981 xor_(ra, rs, r0, rc);
3982 }
3983 }
3984 }
3985
3986
CmpSmiLiteral(Register src1,Smi * smi,Register scratch,CRegister cr)3987 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
3988 CRegister cr) {
3989 #if V8_TARGET_ARCH_PPC64
3990 LoadSmiLiteral(scratch, smi);
3991 cmp(src1, scratch, cr);
3992 #else
3993 Cmpi(src1, Operand(smi), scratch, cr);
3994 #endif
3995 }
3996
3997
CmplSmiLiteral(Register src1,Smi * smi,Register scratch,CRegister cr)3998 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
3999 CRegister cr) {
4000 #if V8_TARGET_ARCH_PPC64
4001 LoadSmiLiteral(scratch, smi);
4002 cmpl(src1, scratch, cr);
4003 #else
4004 Cmpli(src1, Operand(smi), scratch, cr);
4005 #endif
4006 }
4007
4008
AddSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)4009 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4010 Register scratch) {
4011 #if V8_TARGET_ARCH_PPC64
4012 LoadSmiLiteral(scratch, smi);
4013 add(dst, src, scratch);
4014 #else
4015 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
4016 #endif
4017 }
4018
4019
SubSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)4020 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4021 Register scratch) {
4022 #if V8_TARGET_ARCH_PPC64
4023 LoadSmiLiteral(scratch, smi);
4024 sub(dst, src, scratch);
4025 #else
4026 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
4027 #endif
4028 }
4029
4030
AndSmiLiteral(Register dst,Register src,Smi * smi,Register scratch,RCBit rc)4031 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
4032 Register scratch, RCBit rc) {
4033 #if V8_TARGET_ARCH_PPC64
4034 LoadSmiLiteral(scratch, smi);
4035 and_(dst, src, scratch, rc);
4036 #else
4037 And(dst, src, Operand(smi), rc);
4038 #endif
4039 }
4040
4041
4042 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)4043 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4044 Register scratch) {
4045 int offset = mem.offset();
4046
4047 if (!is_int16(offset)) {
4048 /* cannot use d-form */
4049 DCHECK(!scratch.is(no_reg));
4050 mov(scratch, Operand(offset));
4051 LoadPX(dst, MemOperand(mem.ra(), scratch));
4052 } else {
4053 #if V8_TARGET_ARCH_PPC64
4054 int misaligned = (offset & 3);
4055 if (misaligned) {
4056 // adjust base to conform to offset alignment requirements
4057 // Todo: enhance to use scratch if dst is unsuitable
4058 DCHECK(!dst.is(r0));
4059 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4060 ld(dst, MemOperand(dst, (offset & ~3) + 4));
4061 } else {
4062 ld(dst, mem);
4063 }
4064 #else
4065 lwz(dst, mem);
4066 #endif
4067 }
4068 }
4069
LoadPU(Register dst,const MemOperand & mem,Register scratch)4070 void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
4071 Register scratch) {
4072 int offset = mem.offset();
4073
4074 if (!is_int16(offset)) {
4075 /* cannot use d-form */
4076 DCHECK(!scratch.is(no_reg));
4077 mov(scratch, Operand(offset));
4078 LoadPUX(dst, MemOperand(mem.ra(), scratch));
4079 } else {
4080 #if V8_TARGET_ARCH_PPC64
4081 ldu(dst, mem);
4082 #else
4083 lwzu(dst, mem);
4084 #endif
4085 }
4086 }
4087
4088 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)4089 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4090 Register scratch) {
4091 int offset = mem.offset();
4092
4093 if (!is_int16(offset)) {
4094 /* cannot use d-form */
4095 DCHECK(!scratch.is(no_reg));
4096 mov(scratch, Operand(offset));
4097 StorePX(src, MemOperand(mem.ra(), scratch));
4098 } else {
4099 #if V8_TARGET_ARCH_PPC64
4100 int misaligned = (offset & 3);
4101 if (misaligned) {
4102 // adjust base to conform to offset alignment requirements
4103 // a suitable scratch is required here
4104 DCHECK(!scratch.is(no_reg));
4105 if (scratch.is(r0)) {
4106 LoadIntLiteral(scratch, offset);
4107 stdx(src, MemOperand(mem.ra(), scratch));
4108 } else {
4109 addi(scratch, mem.ra(), Operand((offset & 3) - 4));
4110 std(src, MemOperand(scratch, (offset & ~3) + 4));
4111 }
4112 } else {
4113 std(src, mem);
4114 }
4115 #else
4116 stw(src, mem);
4117 #endif
4118 }
4119 }
4120
StorePU(Register src,const MemOperand & mem,Register scratch)4121 void MacroAssembler::StorePU(Register src, const MemOperand& mem,
4122 Register scratch) {
4123 int offset = mem.offset();
4124
4125 if (!is_int16(offset)) {
4126 /* cannot use d-form */
4127 DCHECK(!scratch.is(no_reg));
4128 mov(scratch, Operand(offset));
4129 StorePUX(src, MemOperand(mem.ra(), scratch));
4130 } else {
4131 #if V8_TARGET_ARCH_PPC64
4132 stdu(src, mem);
4133 #else
4134 stwu(src, mem);
4135 #endif
4136 }
4137 }
4138
LoadWordArith(Register dst,const MemOperand & mem,Register scratch)4139 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
4140 Register scratch) {
4141 int offset = mem.offset();
4142
4143 if (!is_int16(offset)) {
4144 DCHECK(!scratch.is(no_reg));
4145 mov(scratch, Operand(offset));
4146 lwax(dst, MemOperand(mem.ra(), scratch));
4147 } else {
4148 #if V8_TARGET_ARCH_PPC64
4149 int misaligned = (offset & 3);
4150 if (misaligned) {
4151 // adjust base to conform to offset alignment requirements
4152 // Todo: enhance to use scratch if dst is unsuitable
4153 DCHECK(!dst.is(r0));
4154 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4155 lwa(dst, MemOperand(dst, (offset & ~3) + 4));
4156 } else {
4157 lwa(dst, mem);
4158 }
4159 #else
4160 lwz(dst, mem);
4161 #endif
4162 }
4163 }
4164
4165
4166 // Variable length depending on whether offset fits into immediate field
4167 // MemOperand currently only supports d-form
LoadWord(Register dst,const MemOperand & mem,Register scratch)4168 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
4169 Register scratch) {
4170 Register base = mem.ra();
4171 int offset = mem.offset();
4172
4173 if (!is_int16(offset)) {
4174 LoadIntLiteral(scratch, offset);
4175 lwzx(dst, MemOperand(base, scratch));
4176 } else {
4177 lwz(dst, mem);
4178 }
4179 }
4180
4181
4182 // Variable length depending on whether offset fits into immediate field
4183 // MemOperand current only supports d-form
StoreWord(Register src,const MemOperand & mem,Register scratch)4184 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
4185 Register scratch) {
4186 Register base = mem.ra();
4187 int offset = mem.offset();
4188
4189 if (!is_int16(offset)) {
4190 LoadIntLiteral(scratch, offset);
4191 stwx(src, MemOperand(base, scratch));
4192 } else {
4193 stw(src, mem);
4194 }
4195 }
4196
4197
LoadHalfWordArith(Register dst,const MemOperand & mem,Register scratch)4198 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
4199 Register scratch) {
4200 int offset = mem.offset();
4201
4202 if (!is_int16(offset)) {
4203 DCHECK(!scratch.is(no_reg));
4204 mov(scratch, Operand(offset));
4205 lhax(dst, MemOperand(mem.ra(), scratch));
4206 } else {
4207 lha(dst, mem);
4208 }
4209 }
4210
4211
4212 // Variable length depending on whether offset fits into immediate field
4213 // MemOperand currently only supports d-form
LoadHalfWord(Register dst,const MemOperand & mem,Register scratch)4214 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
4215 Register scratch) {
4216 Register base = mem.ra();
4217 int offset = mem.offset();
4218
4219 if (!is_int16(offset)) {
4220 LoadIntLiteral(scratch, offset);
4221 lhzx(dst, MemOperand(base, scratch));
4222 } else {
4223 lhz(dst, mem);
4224 }
4225 }
4226
4227
4228 // Variable length depending on whether offset fits into immediate field
4229 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)4230 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
4231 Register scratch) {
4232 Register base = mem.ra();
4233 int offset = mem.offset();
4234
4235 if (!is_int16(offset)) {
4236 LoadIntLiteral(scratch, offset);
4237 sthx(src, MemOperand(base, scratch));
4238 } else {
4239 sth(src, mem);
4240 }
4241 }
4242
4243
4244 // Variable length depending on whether offset fits into immediate field
4245 // MemOperand currently only supports d-form
LoadByte(Register dst,const MemOperand & mem,Register scratch)4246 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
4247 Register scratch) {
4248 Register base = mem.ra();
4249 int offset = mem.offset();
4250
4251 if (!is_int16(offset)) {
4252 LoadIntLiteral(scratch, offset);
4253 lbzx(dst, MemOperand(base, scratch));
4254 } else {
4255 lbz(dst, mem);
4256 }
4257 }
4258
4259
4260 // Variable length depending on whether offset fits into immediate field
4261 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)4262 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
4263 Register scratch) {
4264 Register base = mem.ra();
4265 int offset = mem.offset();
4266
4267 if (!is_int16(offset)) {
4268 LoadIntLiteral(scratch, offset);
4269 stbx(src, MemOperand(base, scratch));
4270 } else {
4271 stb(src, mem);
4272 }
4273 }
4274
4275
LoadRepresentation(Register dst,const MemOperand & mem,Representation r,Register scratch)4276 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
4277 Representation r, Register scratch) {
4278 DCHECK(!r.IsDouble());
4279 if (r.IsInteger8()) {
4280 LoadByte(dst, mem, scratch);
4281 extsb(dst, dst);
4282 } else if (r.IsUInteger8()) {
4283 LoadByte(dst, mem, scratch);
4284 } else if (r.IsInteger16()) {
4285 LoadHalfWordArith(dst, mem, scratch);
4286 } else if (r.IsUInteger16()) {
4287 LoadHalfWord(dst, mem, scratch);
4288 #if V8_TARGET_ARCH_PPC64
4289 } else if (r.IsInteger32()) {
4290 LoadWordArith(dst, mem, scratch);
4291 #endif
4292 } else {
4293 LoadP(dst, mem, scratch);
4294 }
4295 }
4296
4297
StoreRepresentation(Register src,const MemOperand & mem,Representation r,Register scratch)4298 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
4299 Representation r, Register scratch) {
4300 DCHECK(!r.IsDouble());
4301 if (r.IsInteger8() || r.IsUInteger8()) {
4302 StoreByte(src, mem, scratch);
4303 } else if (r.IsInteger16() || r.IsUInteger16()) {
4304 StoreHalfWord(src, mem, scratch);
4305 #if V8_TARGET_ARCH_PPC64
4306 } else if (r.IsInteger32()) {
4307 StoreWord(src, mem, scratch);
4308 #endif
4309 } else {
4310 if (r.IsHeapObject()) {
4311 AssertNotSmi(src);
4312 } else if (r.IsSmi()) {
4313 AssertSmi(src);
4314 }
4315 StoreP(src, mem, scratch);
4316 }
4317 }
4318
4319
LoadDouble(DoubleRegister dst,const MemOperand & mem,Register scratch)4320 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
4321 Register scratch) {
4322 Register base = mem.ra();
4323 int offset = mem.offset();
4324
4325 if (!is_int16(offset)) {
4326 mov(scratch, Operand(offset));
4327 lfdx(dst, MemOperand(base, scratch));
4328 } else {
4329 lfd(dst, mem);
4330 }
4331 }
4332
LoadDoubleU(DoubleRegister dst,const MemOperand & mem,Register scratch)4333 void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
4334 Register scratch) {
4335 Register base = mem.ra();
4336 int offset = mem.offset();
4337
4338 if (!is_int16(offset)) {
4339 mov(scratch, Operand(offset));
4340 lfdux(dst, MemOperand(base, scratch));
4341 } else {
4342 lfdu(dst, mem);
4343 }
4344 }
4345
LoadSingle(DoubleRegister dst,const MemOperand & mem,Register scratch)4346 void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
4347 Register scratch) {
4348 Register base = mem.ra();
4349 int offset = mem.offset();
4350
4351 if (!is_int16(offset)) {
4352 mov(scratch, Operand(offset));
4353 lfsx(dst, MemOperand(base, scratch));
4354 } else {
4355 lfs(dst, mem);
4356 }
4357 }
4358
LoadSingleU(DoubleRegister dst,const MemOperand & mem,Register scratch)4359 void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
4360 Register scratch) {
4361 Register base = mem.ra();
4362 int offset = mem.offset();
4363
4364 if (!is_int16(offset)) {
4365 mov(scratch, Operand(offset));
4366 lfsux(dst, MemOperand(base, scratch));
4367 } else {
4368 lfsu(dst, mem);
4369 }
4370 }
4371
StoreDouble(DoubleRegister src,const MemOperand & mem,Register scratch)4372 void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
4373 Register scratch) {
4374 Register base = mem.ra();
4375 int offset = mem.offset();
4376
4377 if (!is_int16(offset)) {
4378 mov(scratch, Operand(offset));
4379 stfdx(src, MemOperand(base, scratch));
4380 } else {
4381 stfd(src, mem);
4382 }
4383 }
4384
StoreDoubleU(DoubleRegister src,const MemOperand & mem,Register scratch)4385 void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
4386 Register scratch) {
4387 Register base = mem.ra();
4388 int offset = mem.offset();
4389
4390 if (!is_int16(offset)) {
4391 mov(scratch, Operand(offset));
4392 stfdux(src, MemOperand(base, scratch));
4393 } else {
4394 stfdu(src, mem);
4395 }
4396 }
4397
StoreSingle(DoubleRegister src,const MemOperand & mem,Register scratch)4398 void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
4399 Register scratch) {
4400 Register base = mem.ra();
4401 int offset = mem.offset();
4402
4403 if (!is_int16(offset)) {
4404 mov(scratch, Operand(offset));
4405 stfsx(src, MemOperand(base, scratch));
4406 } else {
4407 stfs(src, mem);
4408 }
4409 }
4410
StoreSingleU(DoubleRegister src,const MemOperand & mem,Register scratch)4411 void MacroAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
4412 Register scratch) {
4413 Register base = mem.ra();
4414 int offset = mem.offset();
4415
4416 if (!is_int16(offset)) {
4417 mov(scratch, Operand(offset));
4418 stfsux(src, MemOperand(base, scratch));
4419 } else {
4420 stfsu(src, mem);
4421 }
4422 }
4423
TestJSArrayForAllocationMemento(Register receiver_reg,Register scratch_reg,Register scratch2_reg,Label * no_memento_found)4424 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
4425 Register scratch_reg,
4426 Register scratch2_reg,
4427 Label* no_memento_found) {
4428 Label map_check;
4429 Label top_check;
4430 ExternalReference new_space_allocation_top_adr =
4431 ExternalReference::new_space_allocation_top_address(isolate());
4432 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
4433 const int kMementoLastWordOffset =
4434 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
4435 Register mask = scratch2_reg;
4436
4437 DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
4438
4439 // Bail out if the object is not in new space.
4440 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
4441
4442 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
4443 lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
4444 addi(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
4445
4446 // If the object is in new space, we need to check whether it is on the same
4447 // page as the current top.
4448 mov(ip, Operand(new_space_allocation_top_adr));
4449 LoadP(ip, MemOperand(ip));
4450 Xor(r0, scratch_reg, Operand(ip));
4451 and_(r0, r0, mask, SetRC);
4452 beq(&top_check, cr0);
4453 // The object is on a different page than allocation top. Bail out if the
4454 // object sits on the page boundary as no memento can follow and we cannot
4455 // touch the memory following it.
4456 xor_(r0, scratch_reg, receiver_reg);
4457 and_(r0, r0, mask, SetRC);
4458 bne(no_memento_found, cr0);
4459 // Continue with the actual map check.
4460 b(&map_check);
4461 // If top is on the same page as the current object, we need to check whether
4462 // we are below top.
4463 bind(&top_check);
4464 cmp(scratch_reg, ip);
4465 bge(no_memento_found);
4466 // Memento map check.
4467 bind(&map_check);
4468 LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
4469 Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
4470 r0);
4471 }
4472
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)4473 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4474 Register reg4, Register reg5,
4475 Register reg6) {
4476 RegList regs = 0;
4477 if (reg1.is_valid()) regs |= reg1.bit();
4478 if (reg2.is_valid()) regs |= reg2.bit();
4479 if (reg3.is_valid()) regs |= reg3.bit();
4480 if (reg4.is_valid()) regs |= reg4.bit();
4481 if (reg5.is_valid()) regs |= reg5.bit();
4482 if (reg6.is_valid()) regs |= reg6.bit();
4483
4484 const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
4485 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
4486 int code = config->GetAllocatableGeneralCode(i);
4487 Register candidate = Register::from_code(code);
4488 if (regs & candidate.bit()) continue;
4489 return candidate;
4490 }
4491 UNREACHABLE();
4492 return no_reg;
4493 }
4494
4495
JumpIfDictionaryInPrototypeChain(Register object,Register scratch0,Register scratch1,Label * found)4496 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
4497 Register scratch0,
4498 Register scratch1,
4499 Label* found) {
4500 DCHECK(!scratch1.is(scratch0));
4501 Register current = scratch0;
4502 Label loop_again, end;
4503
4504 // scratch contained elements pointer.
4505 mr(current, object);
4506 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
4507 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
4508 CompareRoot(current, Heap::kNullValueRootIndex);
4509 beq(&end);
4510
4511 // Loop based on the map going up the prototype chain.
4512 bind(&loop_again);
4513 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
4514
4515 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
4516 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
4517 lbz(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
4518 cmpi(scratch1, Operand(JS_OBJECT_TYPE));
4519 blt(found);
4520
4521 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4522 DecodeField<Map::ElementsKindBits>(scratch1);
4523 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
4524 beq(found);
4525 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
4526 CompareRoot(current, Heap::kNullValueRootIndex);
4527 bne(&loop_again);
4528
4529 bind(&end);
4530 }
4531
4532
4533 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)4534 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
4535 Register reg5, Register reg6, Register reg7, Register reg8,
4536 Register reg9, Register reg10) {
4537 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
4538 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4539 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
4540 reg10.is_valid();
4541
4542 RegList regs = 0;
4543 if (reg1.is_valid()) regs |= reg1.bit();
4544 if (reg2.is_valid()) regs |= reg2.bit();
4545 if (reg3.is_valid()) regs |= reg3.bit();
4546 if (reg4.is_valid()) regs |= reg4.bit();
4547 if (reg5.is_valid()) regs |= reg5.bit();
4548 if (reg6.is_valid()) regs |= reg6.bit();
4549 if (reg7.is_valid()) regs |= reg7.bit();
4550 if (reg8.is_valid()) regs |= reg8.bit();
4551 if (reg9.is_valid()) regs |= reg9.bit();
4552 if (reg10.is_valid()) regs |= reg10.bit();
4553 int n_of_non_aliasing_regs = NumRegs(regs);
4554
4555 return n_of_valid_regs != n_of_non_aliasing_regs;
4556 }
4557 #endif
4558
4559
CodePatcher(Isolate * isolate,byte * address,int instructions,FlushICache flush_cache)4560 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
4561 FlushICache flush_cache)
4562 : address_(address),
4563 size_(instructions * Assembler::kInstrSize),
4564 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
4565 flush_cache_(flush_cache) {
4566 // Create a new macro assembler pointing to the address of the code to patch.
4567 // The size is adjusted with kGap on order for the assembler to generate size
4568 // bytes of instructions without failing with buffer size constraints.
4569 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4570 }
4571
4572
~CodePatcher()4573 CodePatcher::~CodePatcher() {
4574 // Indicate that code has changed.
4575 if (flush_cache_ == FLUSH) {
4576 Assembler::FlushICache(masm_.isolate(), address_, size_);
4577 }
4578
4579 // Check that the code was patched as expected.
4580 DCHECK(masm_.pc_ == address_ + size_);
4581 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4582 }
4583
4584
Emit(Instr instr)4585 void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
4586
4587
EmitCondition(Condition cond)4588 void CodePatcher::EmitCondition(Condition cond) {
4589 Instr instr = Assembler::instr_at(masm_.pc_);
4590 switch (cond) {
4591 case eq:
4592 instr = (instr & ~kCondMask) | BT;
4593 break;
4594 case ne:
4595 instr = (instr & ~kCondMask) | BF;
4596 break;
4597 default:
4598 UNIMPLEMENTED();
4599 }
4600 masm_.emit(instr);
4601 }
4602
4603
TruncatingDiv(Register result,Register dividend,int32_t divisor)4604 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
4605 int32_t divisor) {
4606 DCHECK(!dividend.is(result));
4607 DCHECK(!dividend.is(r0));
4608 DCHECK(!result.is(r0));
4609 base::MagicNumbersForDivision<uint32_t> mag =
4610 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4611 mov(r0, Operand(mag.multiplier));
4612 mulhw(result, dividend, r0);
4613 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4614 if (divisor > 0 && neg) {
4615 add(result, result, dividend);
4616 }
4617 if (divisor < 0 && !neg && mag.multiplier > 0) {
4618 sub(result, result, dividend);
4619 }
4620 if (mag.shift > 0) srawi(result, result, mag.shift);
4621 ExtractBit(r0, dividend, 31);
4622 add(result, result, r0);
4623 }
4624
4625 } // namespace internal
4626 } // namespace v8
4627
4628 #endif // V8_TARGET_ARCH_PPC
4629