1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
29 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
30
31 #include "assembler.h"
32 #include "frames.h"
33 #include "v8globals.h"
34
35 namespace v8 {
36 namespace internal {
37
38 // Flags used for the AllocateInNewSpace functions.
39 enum AllocationFlags {
40 // No special flags.
41 NO_ALLOCATION_FLAGS = 0,
42 // Return the pointer to the allocated already tagged as a heap object.
43 TAG_OBJECT = 1 << 0,
44 // The content of the result register already contains the allocation top in
45 // new space.
46 RESULT_CONTAINS_TOP = 1 << 1
47 };
48
49
50 // Convenience for platform-independent signatures. We do not normally
51 // distinguish memory operands from other operands on ia32.
52 typedef Operand MemOperand;
53
54 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
55 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
56
57
58 bool AreAliased(Register r1, Register r2, Register r3, Register r4);
59
60
61 // MacroAssembler implements a collection of frequently used macros.
62 class MacroAssembler: public Assembler {
63 public:
64 // The isolate parameter can be NULL if the macro assembler should
65 // not use isolate-dependent functionality. In this case, it's the
66 // responsibility of the caller to never invoke such function on the
67 // macro assembler.
68 MacroAssembler(Isolate* isolate, void* buffer, int size);
69
70 // ---------------------------------------------------------------------------
71 // GC Support
72 enum RememberedSetFinalAction {
73 kReturnAtEnd,
74 kFallThroughAtEnd
75 };
76
77 // Record in the remembered set the fact that we have a pointer to new space
78 // at the address pointed to by the addr register. Only works if addr is not
79 // in new space.
80 void RememberedSetHelper(Register object, // Used for debug code.
81 Register addr,
82 Register scratch,
83 SaveFPRegsMode save_fp,
84 RememberedSetFinalAction and_then);
85
86 void CheckPageFlag(Register object,
87 Register scratch,
88 int mask,
89 Condition cc,
90 Label* condition_met,
91 Label::Distance condition_met_distance = Label::kFar);
92
93 // Check if object is in new space. Jumps if the object is not in new space.
94 // The register scratch can be object itself, but scratch will be clobbered.
95 void JumpIfNotInNewSpace(Register object,
96 Register scratch,
97 Label* branch,
98 Label::Distance distance = Label::kFar) {
99 InNewSpace(object, scratch, zero, branch, distance);
100 }
101
102 // Check if object is in new space. Jumps if the object is in new space.
103 // The register scratch can be object itself, but it will be clobbered.
104 void JumpIfInNewSpace(Register object,
105 Register scratch,
106 Label* branch,
107 Label::Distance distance = Label::kFar) {
108 InNewSpace(object, scratch, not_zero, branch, distance);
109 }
110
111 // Check if an object has a given incremental marking color. Also uses ecx!
112 void HasColor(Register object,
113 Register scratch0,
114 Register scratch1,
115 Label* has_color,
116 Label::Distance has_color_distance,
117 int first_bit,
118 int second_bit);
119
120 void JumpIfBlack(Register object,
121 Register scratch0,
122 Register scratch1,
123 Label* on_black,
124 Label::Distance on_black_distance = Label::kFar);
125
126 // Checks the color of an object. If the object is already grey or black
127 // then we just fall through, since it is already live. If it is white and
128 // we can determine that it doesn't need to be scanned, then we just mark it
129 // black and fall through. For the rest we jump to the label so the
130 // incremental marker can fix its assumptions.
131 void EnsureNotWhite(Register object,
132 Register scratch1,
133 Register scratch2,
134 Label* object_is_white_and_not_data,
135 Label::Distance distance);
136
137 // Notify the garbage collector that we wrote a pointer into an object.
138 // |object| is the object being stored into, |value| is the object being
139 // stored. value and scratch registers are clobbered by the operation.
140 // The offset is the offset from the start of the object, not the offset from
141 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
142 void RecordWriteField(
143 Register object,
144 int offset,
145 Register value,
146 Register scratch,
147 SaveFPRegsMode save_fp,
148 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
149 SmiCheck smi_check = INLINE_SMI_CHECK);
150
151 // As above, but the offset has the tag presubtracted. For use with
152 // Operand(reg, off).
153 void RecordWriteContextSlot(
154 Register context,
155 int offset,
156 Register value,
157 Register scratch,
158 SaveFPRegsMode save_fp,
159 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
160 SmiCheck smi_check = INLINE_SMI_CHECK) {
161 RecordWriteField(context,
162 offset + kHeapObjectTag,
163 value,
164 scratch,
165 save_fp,
166 remembered_set_action,
167 smi_check);
168 }
169
170 // Notify the garbage collector that we wrote a pointer into a fixed array.
171 // |array| is the array being stored into, |value| is the
172 // object being stored. |index| is the array index represented as a
173 // Smi. All registers are clobbered by the operation RecordWriteArray
174 // filters out smis so it does not update the write barrier if the
175 // value is a smi.
176 void RecordWriteArray(
177 Register array,
178 Register value,
179 Register index,
180 SaveFPRegsMode save_fp,
181 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
182 SmiCheck smi_check = INLINE_SMI_CHECK);
183
184 // For page containing |object| mark region covering |address|
185 // dirty. |object| is the object being stored into, |value| is the
186 // object being stored. The address and value registers are clobbered by the
187 // operation. RecordWrite filters out smis so it does not update the
188 // write barrier if the value is a smi.
189 void RecordWrite(
190 Register object,
191 Register address,
192 Register value,
193 SaveFPRegsMode save_fp,
194 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
195 SmiCheck smi_check = INLINE_SMI_CHECK);
196
197 #ifdef ENABLE_DEBUGGER_SUPPORT
198 // ---------------------------------------------------------------------------
199 // Debugger Support
200
201 void DebugBreak();
202 #endif
203
204 // Enter specific kind of exit frame. Expects the number of
205 // arguments in register eax and sets up the number of arguments in
206 // register edi and the pointer to the first argument in register
207 // esi.
208 void EnterExitFrame(bool save_doubles);
209
210 void EnterApiExitFrame(int argc);
211
212 // Leave the current exit frame. Expects the return value in
213 // register eax:edx (untouched) and the pointer to the first
214 // argument in register esi.
215 void LeaveExitFrame(bool save_doubles);
216
217 // Leave the current exit frame. Expects the return value in
218 // register eax (untouched).
219 void LeaveApiExitFrame();
220
221 // Find the function context up the context chain.
222 void LoadContext(Register dst, int context_chain_length);
223
224 // Conditionally load the cached Array transitioned map of type
225 // transitioned_kind from the global context if the map in register
226 // map_in_out is the cached Array map in the global context of
227 // expected_kind.
228 void LoadTransitionedArrayMapConditional(
229 ElementsKind expected_kind,
230 ElementsKind transitioned_kind,
231 Register map_in_out,
232 Register scratch,
233 Label* no_map_match);
234
235 // Load the initial map for new Arrays from a JSFunction.
236 void LoadInitialArrayMap(Register function_in,
237 Register scratch,
238 Register map_out);
239
240 // Load the global function with the given index.
241 void LoadGlobalFunction(int index, Register function);
242
243 // Load the initial map from the global function. The registers
244 // function and map can be the same.
245 void LoadGlobalFunctionInitialMap(Register function, Register map);
246
247 // Push and pop the registers that can hold pointers.
PushSafepointRegisters()248 void PushSafepointRegisters() { pushad(); }
PopSafepointRegisters()249 void PopSafepointRegisters() { popad(); }
250 // Store the value in register/immediate src in the safepoint
251 // register stack slot for register dst.
252 void StoreToSafepointRegisterSlot(Register dst, Register src);
253 void StoreToSafepointRegisterSlot(Register dst, Immediate src);
254 void LoadFromSafepointRegisterSlot(Register dst, Register src);
255
256 void LoadHeapObject(Register result, Handle<HeapObject> object);
257 void PushHeapObject(Handle<HeapObject> object);
258
LoadObject(Register result,Handle<Object> object)259 void LoadObject(Register result, Handle<Object> object) {
260 if (object->IsHeapObject()) {
261 LoadHeapObject(result, Handle<HeapObject>::cast(object));
262 } else {
263 Set(result, Immediate(object));
264 }
265 }
266
267 // ---------------------------------------------------------------------------
268 // JavaScript invokes
269
270 // Set up call kind marking in ecx. The method takes ecx as an
271 // explicit first parameter to make the code more readable at the
272 // call sites.
273 void SetCallKind(Register dst, CallKind kind);
274
275 // Invoke the JavaScript function code by either calling or jumping.
InvokeCode(Register code,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper,CallKind call_kind)276 void InvokeCode(Register code,
277 const ParameterCount& expected,
278 const ParameterCount& actual,
279 InvokeFlag flag,
280 const CallWrapper& call_wrapper,
281 CallKind call_kind) {
282 InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
283 }
284
285 void InvokeCode(const Operand& code,
286 const ParameterCount& expected,
287 const ParameterCount& actual,
288 InvokeFlag flag,
289 const CallWrapper& call_wrapper,
290 CallKind call_kind);
291
292 void InvokeCode(Handle<Code> code,
293 const ParameterCount& expected,
294 const ParameterCount& actual,
295 RelocInfo::Mode rmode,
296 InvokeFlag flag,
297 const CallWrapper& call_wrapper,
298 CallKind call_kind);
299
300 // Invoke the JavaScript function in the given register. Changes the
301 // current context to the context in the function before invoking.
302 void InvokeFunction(Register function,
303 const ParameterCount& actual,
304 InvokeFlag flag,
305 const CallWrapper& call_wrapper,
306 CallKind call_kind);
307
308 void InvokeFunction(Handle<JSFunction> function,
309 const ParameterCount& actual,
310 InvokeFlag flag,
311 const CallWrapper& call_wrapper,
312 CallKind call_kind);
313
314 // Invoke specified builtin JavaScript function. Adds an entry to
315 // the unresolved list if the name does not resolve.
316 void InvokeBuiltin(Builtins::JavaScript id,
317 InvokeFlag flag,
318 const CallWrapper& call_wrapper = NullCallWrapper());
319
320 // Store the function for the given builtin in the target register.
321 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
322
323 // Store the code object for the given builtin in the target register.
324 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
325
326 // Expression support
327 void Set(Register dst, const Immediate& x);
328 void Set(const Operand& dst, const Immediate& x);
329
330 // Support for constant splitting.
331 bool IsUnsafeImmediate(const Immediate& x);
332 void SafeSet(Register dst, const Immediate& x);
333 void SafePush(const Immediate& x);
334
335 // Compare against a known root, e.g. undefined, null, true, ...
336 void CompareRoot(Register with, Heap::RootListIndex index);
337 void CompareRoot(const Operand& with, Heap::RootListIndex index);
338
339 // Compare object type for heap object.
340 // Incoming register is heap_object and outgoing register is map.
341 void CmpObjectType(Register heap_object, InstanceType type, Register map);
342
343 // Compare instance type for map.
344 void CmpInstanceType(Register map, InstanceType type);
345
346 // Check if a map for a JSObject indicates that the object has fast elements.
347 // Jump to the specified label if it does not.
348 void CheckFastElements(Register map,
349 Label* fail,
350 Label::Distance distance = Label::kFar);
351
352 // Check if a map for a JSObject indicates that the object can have both smi
353 // and HeapObject elements. Jump to the specified label if it does not.
354 void CheckFastObjectElements(Register map,
355 Label* fail,
356 Label::Distance distance = Label::kFar);
357
358 // Check if a map for a JSObject indicates that the object has fast smi only
359 // elements. Jump to the specified label if it does not.
360 void CheckFastSmiOnlyElements(Register map,
361 Label* fail,
362 Label::Distance distance = Label::kFar);
363
364 // Check to see if maybe_number can be stored as a double in
365 // FastDoubleElements. If it can, store it at the index specified by key in
366 // the FastDoubleElements array elements, otherwise jump to fail.
367 void StoreNumberToDoubleElements(Register maybe_number,
368 Register elements,
369 Register key,
370 Register scratch1,
371 XMMRegister scratch2,
372 Label* fail,
373 bool specialize_for_processor);
374
375 // Compare an object's map with the specified map and its transitioned
376 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
377 // result of map compare. If multiple map compares are required, the compare
378 // sequences branches to early_success.
379 void CompareMap(Register obj,
380 Handle<Map> map,
381 Label* early_success,
382 CompareMapMode mode = REQUIRE_EXACT_MAP);
383
384 // Check if the map of an object is equal to a specified map and branch to
385 // label if not. Skip the smi check if not required (object is known to be a
386 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
387 // against maps that are ElementsKind transition maps of the specified map.
388 void CheckMap(Register obj,
389 Handle<Map> map,
390 Label* fail,
391 SmiCheckType smi_check_type,
392 CompareMapMode mode = REQUIRE_EXACT_MAP);
393
394 // Check if the map of an object is equal to a specified map and branch to a
395 // specified target if equal. Skip the smi check if not required (object is
396 // known to be a heap object)
397 void DispatchMap(Register obj,
398 Handle<Map> map,
399 Handle<Code> success,
400 SmiCheckType smi_check_type);
401
402 // Check if the object in register heap_object is a string. Afterwards the
403 // register map contains the object map and the register instance_type
404 // contains the instance_type. The registers map and instance_type can be the
405 // same in which case it contains the instance type afterwards. Either of the
406 // registers map and instance_type can be the same as heap_object.
407 Condition IsObjectStringType(Register heap_object,
408 Register map,
409 Register instance_type);
410
411 // Check if a heap object's type is in the JSObject range, not including
412 // JSFunction. The object's map will be loaded in the map register.
413 // Any or all of the three registers may be the same.
414 // The contents of the scratch register will always be overwritten.
415 void IsObjectJSObjectType(Register heap_object,
416 Register map,
417 Register scratch,
418 Label* fail);
419
420 // The contents of the scratch register will be overwritten.
421 void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
422
423 // FCmp is similar to integer cmp, but requires unsigned
424 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
425 void FCmp();
426
427 void ClampUint8(Register reg);
428
429 void ClampDoubleToUint8(XMMRegister input_reg,
430 XMMRegister scratch_reg,
431 Register result_reg);
432
433
434 // Smi tagging support.
SmiTag(Register reg)435 void SmiTag(Register reg) {
436 STATIC_ASSERT(kSmiTag == 0);
437 STATIC_ASSERT(kSmiTagSize == 1);
438 add(reg, reg);
439 }
SmiUntag(Register reg)440 void SmiUntag(Register reg) {
441 sar(reg, kSmiTagSize);
442 }
443
444 // Modifies the register even if it does not contain a Smi!
SmiUntag(Register reg,Label * is_smi)445 void SmiUntag(Register reg, Label* is_smi) {
446 STATIC_ASSERT(kSmiTagSize == 1);
447 sar(reg, kSmiTagSize);
448 STATIC_ASSERT(kSmiTag == 0);
449 j(not_carry, is_smi);
450 }
451
452 // Jump the register contains a smi.
453 inline void JumpIfSmi(Register value,
454 Label* smi_label,
455 Label::Distance distance = Label::kFar) {
456 test(value, Immediate(kSmiTagMask));
457 j(zero, smi_label, distance);
458 }
459 // Jump if the operand is a smi.
460 inline void JumpIfSmi(Operand value,
461 Label* smi_label,
462 Label::Distance distance = Label::kFar) {
463 test(value, Immediate(kSmiTagMask));
464 j(zero, smi_label, distance);
465 }
466 // Jump if register contain a non-smi.
467 inline void JumpIfNotSmi(Register value,
468 Label* not_smi_label,
469 Label::Distance distance = Label::kFar) {
470 test(value, Immediate(kSmiTagMask));
471 j(not_zero, not_smi_label, distance);
472 }
473
474 void LoadInstanceDescriptors(Register map, Register descriptors);
475
476 void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
477
478 // Abort execution if argument is not a number. Used in debug code.
479 void AbortIfNotNumber(Register object);
480
481 // Abort execution if argument is not a smi. Used in debug code.
482 void AbortIfNotSmi(Register object);
483
484 // Abort execution if argument is a smi. Used in debug code.
485 void AbortIfSmi(Register object);
486
487 // Abort execution if argument is a string. Used in debug code.
488 void AbortIfNotString(Register object);
489
490 // ---------------------------------------------------------------------------
491 // Exception handling
492
493 // Push a new try handler and link it into try handler chain.
494 void PushTryHandler(StackHandler::Kind kind, int handler_index);
495
496 // Unlink the stack handler on top of the stack from the try handler chain.
497 void PopTryHandler();
498
499 // Throw to the top handler in the try hander chain.
500 void Throw(Register value);
501
502 // Throw past all JS frames to the top JS entry frame.
503 void ThrowUncatchable(Register value);
504
505 // ---------------------------------------------------------------------------
506 // Inline caching support
507
508 // Generate code for checking access rights - used for security checks
509 // on access to global objects across environments. The holder register
510 // is left untouched, but the scratch register is clobbered.
511 void CheckAccessGlobalProxy(Register holder_reg,
512 Register scratch,
513 Label* miss);
514
515 void GetNumberHash(Register r0, Register scratch);
516
517 void LoadFromNumberDictionary(Label* miss,
518 Register elements,
519 Register key,
520 Register r0,
521 Register r1,
522 Register r2,
523 Register result);
524
525
526 // ---------------------------------------------------------------------------
527 // Allocation support
528
529 // Allocate an object in new space. If the new space is exhausted control
530 // continues at the gc_required label. The allocated object is returned in
531 // result and end of the new object is returned in result_end. The register
532 // scratch can be passed as no_reg in which case an additional object
533 // reference will be added to the reloc info. The returned pointers in result
534 // and result_end have not yet been tagged as heap objects. If
535 // result_contains_top_on_entry is true the content of result is known to be
536 // the allocation top on entry (could be result_end from a previous call to
537 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
538 // should be no_reg as it is never used.
539 void AllocateInNewSpace(int object_size,
540 Register result,
541 Register result_end,
542 Register scratch,
543 Label* gc_required,
544 AllocationFlags flags);
545
546 void AllocateInNewSpace(int header_size,
547 ScaleFactor element_size,
548 Register element_count,
549 Register result,
550 Register result_end,
551 Register scratch,
552 Label* gc_required,
553 AllocationFlags flags);
554
555 void AllocateInNewSpace(Register object_size,
556 Register result,
557 Register result_end,
558 Register scratch,
559 Label* gc_required,
560 AllocationFlags flags);
561
562 // Undo allocation in new space. The object passed and objects allocated after
563 // it will no longer be allocated. Make sure that no pointers are left to the
564 // object(s) no longer allocated as they would be invalid when allocation is
565 // un-done.
566 void UndoAllocationInNewSpace(Register object);
567
568 // Allocate a heap number in new space with undefined value. The
569 // register scratch2 can be passed as no_reg; the others must be
570 // valid registers. Returns tagged pointer in result register, or
571 // jumps to gc_required if new space is full.
572 void AllocateHeapNumber(Register result,
573 Register scratch1,
574 Register scratch2,
575 Label* gc_required);
576
577 // Allocate a sequential string. All the header fields of the string object
578 // are initialized.
579 void AllocateTwoByteString(Register result,
580 Register length,
581 Register scratch1,
582 Register scratch2,
583 Register scratch3,
584 Label* gc_required);
585 void AllocateAsciiString(Register result,
586 Register length,
587 Register scratch1,
588 Register scratch2,
589 Register scratch3,
590 Label* gc_required);
591 void AllocateAsciiString(Register result,
592 int length,
593 Register scratch1,
594 Register scratch2,
595 Label* gc_required);
596
597 // Allocate a raw cons string object. Only the map field of the result is
598 // initialized.
599 void AllocateTwoByteConsString(Register result,
600 Register scratch1,
601 Register scratch2,
602 Label* gc_required);
603 void AllocateAsciiConsString(Register result,
604 Register scratch1,
605 Register scratch2,
606 Label* gc_required);
607
608 // Allocate a raw sliced string object. Only the map field of the result is
609 // initialized.
610 void AllocateTwoByteSlicedString(Register result,
611 Register scratch1,
612 Register scratch2,
613 Label* gc_required);
614 void AllocateAsciiSlicedString(Register result,
615 Register scratch1,
616 Register scratch2,
617 Label* gc_required);
618
619 // Copy memory, byte-by-byte, from source to destination. Not optimized for
620 // long or aligned copies.
621 // The contents of index and scratch are destroyed.
622 void CopyBytes(Register source,
623 Register destination,
624 Register length,
625 Register scratch);
626
627 // Initialize fields with filler values. Fields starting at |start_offset|
628 // not including end_offset are overwritten with the value in |filler|. At
629 // the end the loop, |start_offset| takes the value of |end_offset|.
630 void InitializeFieldsWithFiller(Register start_offset,
631 Register end_offset,
632 Register filler);
633
634 // ---------------------------------------------------------------------------
635 // Support functions.
636
637 // Check a boolean-bit of a Smi field.
638 void BooleanBitTest(Register object, int field_offset, int bit_index);
639
640 // Check if result is zero and op is negative.
641 void NegativeZeroTest(Register result, Register op, Label* then_label);
642
643 // Check if result is zero and any of op1 and op2 are negative.
644 // Register scratch is destroyed, and it must be different from op2.
645 void NegativeZeroTest(Register result, Register op1, Register op2,
646 Register scratch, Label* then_label);
647
648 // Try to get function prototype of a function and puts the value in
649 // the result register. Checks that the function really is a
650 // function and jumps to the miss label if the fast checks fail. The
651 // function register will be untouched; the other registers may be
652 // clobbered.
653 void TryGetFunctionPrototype(Register function,
654 Register result,
655 Register scratch,
656 Label* miss,
657 bool miss_on_bound_function = false);
658
659 // Generates code for reporting that an illegal operation has
660 // occurred.
661 void IllegalOperation(int num_arguments);
662
663 // Picks out an array index from the hash field.
664 // Register use:
665 // hash - holds the index's hash. Clobbered.
666 // index - holds the overwritten index on exit.
667 void IndexFromHash(Register hash, Register index);
668
669 // ---------------------------------------------------------------------------
670 // Runtime calls
671
672 // Call a code stub. Generate the code if necessary.
673 void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
674
675 // Tail call a code stub (jump). Generate the code if necessary.
676 void TailCallStub(CodeStub* stub);
677
678 // Return from a code stub after popping its arguments.
679 void StubReturn(int argc);
680
681 // Call a runtime routine.
682 void CallRuntime(const Runtime::Function* f, int num_arguments);
683 void CallRuntimeSaveDoubles(Runtime::FunctionId id);
684
685 // Convenience function: Same as above, but takes the fid instead.
686 void CallRuntime(Runtime::FunctionId id, int num_arguments);
687
688 // Convenience function: call an external reference.
689 void CallExternalReference(ExternalReference ref, int num_arguments);
690
691 // Tail call of a runtime routine (jump).
692 // Like JumpToExternalReference, but also takes care of passing the number
693 // of parameters.
694 void TailCallExternalReference(const ExternalReference& ext,
695 int num_arguments,
696 int result_size);
697
698 // Convenience function: tail call a runtime routine (jump).
699 void TailCallRuntime(Runtime::FunctionId fid,
700 int num_arguments,
701 int result_size);
702
703 // Before calling a C-function from generated code, align arguments on stack.
704 // After aligning the frame, arguments must be stored in esp[0], esp[4],
705 // etc., not pushed. The argument count assumes all arguments are word sized.
706 // Some compilers/platforms require the stack to be aligned when calling
707 // C++ code.
708 // Needs a scratch register to do some arithmetic. This register will be
709 // trashed.
710 void PrepareCallCFunction(int num_arguments, Register scratch);
711
712 // Calls a C function and cleans up the space for arguments allocated
713 // by PrepareCallCFunction. The called function is not allowed to trigger a
714 // garbage collection, since that might move the code and invalidate the
715 // return address (unless this is somehow accounted for by the called
716 // function).
717 void CallCFunction(ExternalReference function, int num_arguments);
718 void CallCFunction(Register function, int num_arguments);
719
720 // Prepares stack to put arguments (aligns and so on). Reserves
721 // space for return value if needed (assumes the return value is a handle).
722 // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
723 // etc. Saves context (esi). If space was reserved for return value then
724 // stores the pointer to the reserved slot into esi.
725 void PrepareCallApiFunction(int argc);
726
727 // Calls an API function. Allocates HandleScope, extracts returned value
728 // from handle and propagates exceptions. Clobbers ebx, edi and
729 // caller-save registers. Restores context. On return removes
730 // stack_space * kPointerSize (GCed).
731 void CallApiFunctionAndReturn(Address function_address, int stack_space);
732
733 // Jump to a runtime routine.
734 void JumpToExternalReference(const ExternalReference& ext);
735
736 // ---------------------------------------------------------------------------
737 // Utilities
738
739 void Ret();
740
741 // Return and drop arguments from stack, where the number of arguments
742 // may be bigger than 2^16 - 1. Requires a scratch register.
743 void Ret(int bytes_dropped, Register scratch);
744
745 // Emit code to discard a non-negative number of pointer-sized elements
746 // from the stack, clobbering only the esp register.
747 void Drop(int element_count);
748
Call(Label * target)749 void Call(Label* target) { call(target); }
750
751 // Emit call to the code we are currently generating.
CallSelf()752 void CallSelf() {
753 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
754 call(self, RelocInfo::CODE_TARGET);
755 }
756
757 // Move if the registers are not identical.
758 void Move(Register target, Register source);
759
760 // Push a handle value.
Push(Handle<Object> handle)761 void Push(Handle<Object> handle) { push(Immediate(handle)); }
762
CodeObject()763 Handle<Object> CodeObject() {
764 ASSERT(!code_object_.is_null());
765 return code_object_;
766 }
767
768
769 // ---------------------------------------------------------------------------
770 // StatsCounter support
771
772 void SetCounter(StatsCounter* counter, int value);
773 void IncrementCounter(StatsCounter* counter, int value);
774 void DecrementCounter(StatsCounter* counter, int value);
775 void IncrementCounter(Condition cc, StatsCounter* counter, int value);
776 void DecrementCounter(Condition cc, StatsCounter* counter, int value);
777
778
779 // ---------------------------------------------------------------------------
780 // Debugging
781
782 // Calls Abort(msg) if the condition cc is not satisfied.
783 // Use --debug_code to enable.
784 void Assert(Condition cc, const char* msg);
785
786 void AssertFastElements(Register elements);
787
788 // Like Assert(), but always enabled.
789 void Check(Condition cc, const char* msg);
790
791 // Print a message to stdout and abort execution.
792 void Abort(const char* msg);
793
794 // Check that the stack is aligned.
795 void CheckStackAlignment();
796
797 // Verify restrictions about code generated in stubs.
set_generating_stub(bool value)798 void set_generating_stub(bool value) { generating_stub_ = value; }
generating_stub()799 bool generating_stub() { return generating_stub_; }
set_allow_stub_calls(bool value)800 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
allow_stub_calls()801 bool allow_stub_calls() { return allow_stub_calls_; }
set_has_frame(bool value)802 void set_has_frame(bool value) { has_frame_ = value; }
has_frame()803 bool has_frame() { return has_frame_; }
804 inline bool AllowThisStubCall(CodeStub* stub);
805
806 // ---------------------------------------------------------------------------
807 // String utilities.
808
809 // Check whether the instance type represents a flat ASCII string. Jump to the
810 // label if not. If the instance type can be scratched specify same register
811 // for both instance type and scratch.
812 void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
813 Register scratch,
814 Label* on_not_flat_ascii_string);
815
816 // Checks if both objects are sequential ASCII strings, and jumps to label
817 // if either is not.
818 void JumpIfNotBothSequentialAsciiStrings(Register object1,
819 Register object2,
820 Register scratch1,
821 Register scratch2,
822 Label* on_not_flat_ascii_strings);
823
SafepointRegisterStackIndex(Register reg)824 static int SafepointRegisterStackIndex(Register reg) {
825 return SafepointRegisterStackIndex(reg.code());
826 }
827
828 // Activation support.
829 void EnterFrame(StackFrame::Type type);
830 void LeaveFrame(StackFrame::Type type);
831
832 // Expects object in eax and returns map with validated enum cache
833 // in eax. Assumes that any other register can be used as a scratch.
834 void CheckEnumCache(Label* call_runtime);
835
836 private:
837 bool generating_stub_;
838 bool allow_stub_calls_;
839 bool has_frame_;
840 // This handle will be patched with the code object on installation.
841 Handle<Object> code_object_;
842
843 // Helper functions for generating invokes.
844 void InvokePrologue(const ParameterCount& expected,
845 const ParameterCount& actual,
846 Handle<Code> code_constant,
847 const Operand& code_operand,
848 Label* done,
849 bool* definitely_mismatches,
850 InvokeFlag flag,
851 Label::Distance done_distance,
852 const CallWrapper& call_wrapper = NullCallWrapper(),
853 CallKind call_kind = CALL_AS_METHOD);
854
855 void EnterExitFramePrologue();
856 void EnterExitFrameEpilogue(int argc, bool save_doubles);
857
858 void LeaveExitFrameEpilogue();
859
860 // Allocation support helpers.
861 void LoadAllocationTopHelper(Register result,
862 Register scratch,
863 AllocationFlags flags);
864 void UpdateAllocationTopHelper(Register result_end, Register scratch);
865
866 // Helper for PopHandleScope. Allowed to perform a GC and returns
867 // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
868 // possibly returns a failure object indicating an allocation failure.
869 MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved,
870 Register scratch,
871 bool gc_allowed);
872
873 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
874 void InNewSpace(Register object,
875 Register scratch,
876 Condition cc,
877 Label* condition_met,
878 Label::Distance condition_met_distance = Label::kFar);
879
880 // Helper for finding the mark bits for an address. Afterwards, the
881 // bitmap register points at the word with the mark bits and the mask
882 // the position of the first bit. Uses ecx as scratch and leaves addr_reg
883 // unchanged.
884 inline void GetMarkBits(Register addr_reg,
885 Register bitmap_reg,
886 Register mask_reg);
887
888 // Helper for throwing exceptions. Compute a handler address and jump to
889 // it. See the implementation for register usage.
890 void JumpToHandlerEntry();
891
892 // Compute memory operands for safepoint stack slots.
893 Operand SafepointRegisterSlot(Register reg);
894 static int SafepointRegisterStackIndex(int reg_code);
895
896 // Needs access to SafepointRegisterStackIndex for optimized frame
897 // traversal.
898 friend class OptimizedFrame;
899 };
900
901
902 // The code patcher is used to patch (typically) small parts of code e.g. for
903 // debugging and other types of instrumentation. When using the code patcher
904 // the exact number of bytes specified must be emitted. Is not legal to emit
905 // relocation information. If any of these constraints are violated it causes
906 // an assertion.
907 class CodePatcher {
908 public:
909 CodePatcher(byte* address, int size);
910 virtual ~CodePatcher();
911
912 // Macro assembler to emit code.
masm()913 MacroAssembler* masm() { return &masm_; }
914
915 private:
916 byte* address_; // The address of the code being patched.
917 int size_; // Number of bytes of the expected patch size.
918 MacroAssembler masm_; // Macro assembler used to generate the code.
919 };
920
921
922 // -----------------------------------------------------------------------------
923 // Static helper functions.
924
925 // Generate an Operand for loading a field from an object.
FieldOperand(Register object,int offset)926 inline Operand FieldOperand(Register object, int offset) {
927 return Operand(object, offset - kHeapObjectTag);
928 }
929
930
931 // Generate an Operand for loading an indexed field from an object.
FieldOperand(Register object,Register index,ScaleFactor scale,int offset)932 inline Operand FieldOperand(Register object,
933 Register index,
934 ScaleFactor scale,
935 int offset) {
936 return Operand(object, index, scale, offset - kHeapObjectTag);
937 }
938
939
ContextOperand(Register context,int index)940 inline Operand ContextOperand(Register context, int index) {
941 return Operand(context, Context::SlotOffset(index));
942 }
943
944
GlobalObjectOperand()945 inline Operand GlobalObjectOperand() {
946 return ContextOperand(esi, Context::GLOBAL_INDEX);
947 }
948
949
950 // Generates an Operand for saving parameters after PrepareCallApiFunction.
951 Operand ApiParameterOperand(int index);
952
953
954 #ifdef GENERATED_CODE_COVERAGE
955 extern void LogGeneratedCodeCoverage(const char* file_line);
956 #define CODE_COVERAGE_STRINGIFY(x) #x
957 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
958 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
959 #define ACCESS_MASM(masm) { \
960 byte* ia32_coverage_function = \
961 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
962 masm->pushfd(); \
963 masm->pushad(); \
964 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
965 masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
966 masm->pop(eax); \
967 masm->popad(); \
968 masm->popfd(); \
969 } \
970 masm->
971 #else
972 #define ACCESS_MASM(masm) masm->
973 #endif
974
975
976 } } // namespace v8::internal
977
978 #endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
979