1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
28
29 #include <wtf/Platform.h>
30
31 #if ENABLE(JIT)
32
33 namespace JSC {
34
35 /* Deprecated: Please use JITStubCall instead. */
36
37 // puts an arg onto the stack, as an arg to a context threaded function.
emitPutJITStubArg(RegisterID src,unsigned argumentNumber)38 ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
39 {
40 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
41 poke(src, argumentStackOffset);
42 }
43
44 /* Deprecated: Please use JITStubCall instead. */
45
emitPutJITStubArgConstant(unsigned value,unsigned argumentNumber)46 ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
47 {
48 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
49 poke(Imm32(value), argumentStackOffset);
50 }
51
52 /* Deprecated: Please use JITStubCall instead. */
53
emitPutJITStubArgConstant(void * value,unsigned argumentNumber)54 ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
55 {
56 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
57 poke(ImmPtr(value), argumentStackOffset);
58 }
59
60 /* Deprecated: Please use JITStubCall instead. */
61
emitGetJITStubArg(unsigned argumentNumber,RegisterID dst)62 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
63 {
64 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
65 peek(dst, argumentStackOffset);
66 }
67
isOperandConstantImmediateDouble(unsigned src)68 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
69 {
70 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
71 }
72
getConstantOperand(unsigned src)73 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
74 {
75 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
76 return m_codeBlock->getConstant(src);
77 }
78
emitPutToCallFrameHeader(RegisterID from,RegisterFile::CallFrameHeaderEntry entry)79 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
80 {
81 storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
82 }
83
emitPutImmediateToCallFrameHeader(void * value,RegisterFile::CallFrameHeaderEntry entry)84 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
85 {
86 storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
87 }
88
emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry,RegisterID to,RegisterID from)89 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
90 {
91 loadPtr(Address(from, entry * sizeof(Register)), to);
92 #if !USE(JSVALUE32_64)
93 killLastResultRegister();
94 #endif
95 }
96
emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry,RegisterID to,RegisterID from)97 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
98 {
99 load32(Address(from, entry * sizeof(Register)), to);
100 #if !USE(JSVALUE32_64)
101 killLastResultRegister();
102 #endif
103 }
104
emitNakedCall(CodePtr function)105 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
106 {
107 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
108
109 Call nakedCall = nearCall();
110 m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
111 return nakedCall;
112 }
113
114 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
115
beginUninterruptedSequence(int insnSpace,int constSpace)116 ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
117 {
118 #if CPU(ARM_TRADITIONAL)
119 #ifndef NDEBUG
120 // Ensure the label after the sequence can also fit
121 insnSpace += sizeof(ARMWord);
122 constSpace += sizeof(uint64_t);
123 #endif
124
125 ensureSpace(insnSpace, constSpace);
126
127 #endif
128
129 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
130 #ifndef NDEBUG
131 m_uninterruptedInstructionSequenceBegin = label();
132 m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
133 #endif
134 #endif
135 }
136
endUninterruptedSequence(int insnSpace,int constSpace)137 ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
138 {
139 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
140 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
141 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
142 #endif
143 }
144
145 #endif
146
147 #if CPU(ARM)
148
preserveReturnAddressAfterCall(RegisterID reg)149 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
150 {
151 move(linkRegister, reg);
152 }
153
restoreReturnAddressBeforeReturn(RegisterID reg)154 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
155 {
156 move(reg, linkRegister);
157 }
158
restoreReturnAddressBeforeReturn(Address address)159 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
160 {
161 loadPtr(address, linkRegister);
162 }
163
164 #else // CPU(X86) || CPU(X86_64)
165
preserveReturnAddressAfterCall(RegisterID reg)166 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
167 {
168 pop(reg);
169 }
170
restoreReturnAddressBeforeReturn(RegisterID reg)171 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
172 {
173 push(reg);
174 }
175
restoreReturnAddressBeforeReturn(Address address)176 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
177 {
178 push(address);
179 }
180
181 #endif
182
183 #if USE(JIT_STUB_ARGUMENT_VA_LIST)
restoreArgumentReference()184 ALWAYS_INLINE void JIT::restoreArgumentReference()
185 {
186 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
187 }
restoreArgumentReferenceForTrampoline()188 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
189 #else
restoreArgumentReference()190 ALWAYS_INLINE void JIT::restoreArgumentReference()
191 {
192 move(stackPointerRegister, firstArgumentRegister);
193 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
194 }
restoreArgumentReferenceForTrampoline()195 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
196 {
197 #if CPU(X86)
198 // Within a trampoline the return address will be on the stack at this point.
199 addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
200 #elif CPU(ARM)
201 move(stackPointerRegister, firstArgumentRegister);
202 #endif
203 // In the trampoline on x86-64, the first argument register is not overwritten.
204 }
205 #endif
206
checkStructure(RegisterID reg,Structure * structure)207 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
208 {
209 return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
210 }
211
linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator & iter,int vReg)212 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
213 {
214 if (!m_codeBlock->isKnownNotImmediate(vReg))
215 linkSlowCase(iter);
216 }
217
addSlowCase(Jump jump)218 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
219 {
220 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
221
222 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
223 }
224
addSlowCase(JumpList jumpList)225 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
226 {
227 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
228
229 const JumpList::JumpVector& jumpVector = jumpList.jumps();
230 size_t size = jumpVector.size();
231 for (size_t i = 0; i < size; ++i)
232 m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
233 }
234
addJump(Jump jump,int relativeOffset)235 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
236 {
237 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
238
239 m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
240 }
241
emitJumpSlowToHot(Jump jump,int relativeOffset)242 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
243 {
244 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
245
246 jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
247 }
248
249 #if ENABLE(SAMPLING_FLAGS)
setSamplingFlag(int32_t flag)250 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
251 {
252 ASSERT(flag >= 1);
253 ASSERT(flag <= 32);
254 or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
255 }
256
clearSamplingFlag(int32_t flag)257 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
258 {
259 ASSERT(flag >= 1);
260 ASSERT(flag <= 32);
261 and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
262 }
263 #endif
264
265 #if ENABLE(SAMPLING_COUNTERS)
emitCount(AbstractSamplingCounter & counter,uint32_t count)266 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
267 {
268 #if CPU(X86_64) // Or any other 64-bit plattform.
269 addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
270 #elif CPU(X86) // Or any other little-endian 32-bit plattform.
271 intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
272 add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
273 addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
274 #else
275 #error "SAMPLING_FLAGS not implemented on this platform."
276 #endif
277 }
278 #endif
279
280 #if ENABLE(OPCODE_SAMPLING)
281 #if CPU(X86_64)
sampleInstruction(Instruction * instruction,bool inHostFunction)282 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
283 {
284 move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
285 storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
286 }
287 #else
sampleInstruction(Instruction * instruction,bool inHostFunction)288 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
289 {
290 storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
291 }
292 #endif
293 #endif
294
295 #if ENABLE(CODEBLOCK_SAMPLING)
296 #if CPU(X86_64)
sampleCodeBlock(CodeBlock * codeBlock)297 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
298 {
299 move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
300 storePtr(ImmPtr(codeBlock), X86Registers::ecx);
301 }
302 #else
sampleCodeBlock(CodeBlock * codeBlock)303 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
304 {
305 storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
306 }
307 #endif
308 #endif
309
addressFor(unsigned index,RegisterID base)310 inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
311 {
312 return Address(base, (index * sizeof(Register)));
313 }
314
315 #if USE(JSVALUE32_64)
316
tagFor(unsigned index,RegisterID base)317 inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
318 {
319 return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
320 }
321
payloadFor(unsigned index,RegisterID base)322 inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
323 {
324 return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
325 }
326
emitLoadTag(unsigned index,RegisterID tag)327 inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
328 {
329 RegisterID mappedTag;
330 if (getMappedTag(index, mappedTag)) {
331 move(mappedTag, tag);
332 unmap(tag);
333 return;
334 }
335
336 if (m_codeBlock->isConstantRegisterIndex(index)) {
337 move(Imm32(getConstantOperand(index).tag()), tag);
338 unmap(tag);
339 return;
340 }
341
342 load32(tagFor(index), tag);
343 unmap(tag);
344 }
345
emitLoadPayload(unsigned index,RegisterID payload)346 inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
347 {
348 RegisterID mappedPayload;
349 if (getMappedPayload(index, mappedPayload)) {
350 move(mappedPayload, payload);
351 unmap(payload);
352 return;
353 }
354
355 if (m_codeBlock->isConstantRegisterIndex(index)) {
356 move(Imm32(getConstantOperand(index).payload()), payload);
357 unmap(payload);
358 return;
359 }
360
361 load32(payloadFor(index), payload);
362 unmap(payload);
363 }
364
emitLoad(const JSValue & v,RegisterID tag,RegisterID payload)365 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
366 {
367 move(Imm32(v.payload()), payload);
368 move(Imm32(v.tag()), tag);
369 }
370
emitLoad(unsigned index,RegisterID tag,RegisterID payload,RegisterID base)371 inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
372 {
373 ASSERT(tag != payload);
374
375 if (base == callFrameRegister) {
376 ASSERT(payload != base);
377 emitLoadPayload(index, payload);
378 emitLoadTag(index, tag);
379 return;
380 }
381
382 if (payload == base) { // avoid stomping base
383 load32(tagFor(index, base), tag);
384 load32(payloadFor(index, base), payload);
385 return;
386 }
387
388 load32(payloadFor(index, base), payload);
389 load32(tagFor(index, base), tag);
390 }
391
emitLoad2(unsigned index1,RegisterID tag1,RegisterID payload1,unsigned index2,RegisterID tag2,RegisterID payload2)392 inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
393 {
394 if (isMapped(index1)) {
395 emitLoad(index1, tag1, payload1);
396 emitLoad(index2, tag2, payload2);
397 return;
398 }
399 emitLoad(index2, tag2, payload2);
400 emitLoad(index1, tag1, payload1);
401 }
402
emitLoadDouble(unsigned index,FPRegisterID value)403 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
404 {
405 if (m_codeBlock->isConstantRegisterIndex(index)) {
406 Register& inConstantPool = m_codeBlock->constantRegister(index);
407 loadDouble(&inConstantPool, value);
408 } else
409 loadDouble(addressFor(index), value);
410 }
411
emitLoadInt32ToDouble(unsigned index,FPRegisterID value)412 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
413 {
414 if (m_codeBlock->isConstantRegisterIndex(index)) {
415 Register& inConstantPool = m_codeBlock->constantRegister(index);
416 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
417 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
418 } else
419 convertInt32ToDouble(payloadFor(index), value);
420 }
421
emitStore(unsigned index,RegisterID tag,RegisterID payload,RegisterID base)422 inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
423 {
424 store32(payload, payloadFor(index, base));
425 store32(tag, tagFor(index, base));
426 }
427
emitStoreInt32(unsigned index,RegisterID payload,bool indexIsInt32)428 inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
429 {
430 store32(payload, payloadFor(index, callFrameRegister));
431 if (!indexIsInt32)
432 store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
433 }
434
emitStoreInt32(unsigned index,Imm32 payload,bool indexIsInt32)435 inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32)
436 {
437 store32(payload, payloadFor(index, callFrameRegister));
438 if (!indexIsInt32)
439 store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
440 }
441
emitStoreCell(unsigned index,RegisterID payload,bool indexIsCell)442 inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
443 {
444 store32(payload, payloadFor(index, callFrameRegister));
445 if (!indexIsCell)
446 store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister));
447 }
448
emitStoreBool(unsigned index,RegisterID tag,bool indexIsBool)449 inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool)
450 {
451 if (!indexIsBool)
452 store32(Imm32(0), payloadFor(index, callFrameRegister));
453 store32(tag, tagFor(index, callFrameRegister));
454 }
455
emitStoreDouble(unsigned index,FPRegisterID value)456 inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
457 {
458 storeDouble(value, addressFor(index));
459 }
460
emitStore(unsigned index,const JSValue constant,RegisterID base)461 inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
462 {
463 store32(Imm32(constant.payload()), payloadFor(index, base));
464 store32(Imm32(constant.tag()), tagFor(index, base));
465 }
466
emitInitRegister(unsigned dst)467 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
468 {
469 emitStore(dst, jsUndefined());
470 }
471
isLabeled(unsigned bytecodeIndex)472 inline bool JIT::isLabeled(unsigned bytecodeIndex)
473 {
474 for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
475 unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
476 if (jumpTarget == bytecodeIndex)
477 return true;
478 if (jumpTarget > bytecodeIndex)
479 return false;
480 }
481 return false;
482 }
483
map(unsigned bytecodeIndex,unsigned virtualRegisterIndex,RegisterID tag,RegisterID payload)484 inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
485 {
486 if (isLabeled(bytecodeIndex))
487 return;
488
489 m_mappedBytecodeIndex = bytecodeIndex;
490 m_mappedVirtualRegisterIndex = virtualRegisterIndex;
491 m_mappedTag = tag;
492 m_mappedPayload = payload;
493 }
494
unmap(RegisterID registerID)495 inline void JIT::unmap(RegisterID registerID)
496 {
497 if (m_mappedTag == registerID)
498 m_mappedTag = (RegisterID)-1;
499 else if (m_mappedPayload == registerID)
500 m_mappedPayload = (RegisterID)-1;
501 }
502
unmap()503 inline void JIT::unmap()
504 {
505 m_mappedBytecodeIndex = (unsigned)-1;
506 m_mappedVirtualRegisterIndex = (unsigned)-1;
507 m_mappedTag = (RegisterID)-1;
508 m_mappedPayload = (RegisterID)-1;
509 }
510
isMapped(unsigned virtualRegisterIndex)511 inline bool JIT::isMapped(unsigned virtualRegisterIndex)
512 {
513 if (m_mappedBytecodeIndex != m_bytecodeIndex)
514 return false;
515 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
516 return false;
517 return true;
518 }
519
getMappedPayload(unsigned virtualRegisterIndex,RegisterID & payload)520 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
521 {
522 if (m_mappedBytecodeIndex != m_bytecodeIndex)
523 return false;
524 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
525 return false;
526 if (m_mappedPayload == (RegisterID)-1)
527 return false;
528 payload = m_mappedPayload;
529 return true;
530 }
531
getMappedTag(unsigned virtualRegisterIndex,RegisterID & tag)532 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
533 {
534 if (m_mappedBytecodeIndex != m_bytecodeIndex)
535 return false;
536 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
537 return false;
538 if (m_mappedTag == (RegisterID)-1)
539 return false;
540 tag = m_mappedTag;
541 return true;
542 }
543
emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)544 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
545 {
546 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
547 addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag)));
548 }
549
emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex,RegisterID tag)550 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
551 {
552 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
553 addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
554 }
555
linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator & iter,unsigned virtualRegisterIndex)556 inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
557 {
558 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
559 linkSlowCase(iter);
560 }
561
isOperandConstantImmediateInt(unsigned src)562 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
563 {
564 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
565 }
566
getOperandConstantImmediateInt(unsigned op1,unsigned op2,unsigned & op,int32_t & constant)567 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
568 {
569 if (isOperandConstantImmediateInt(op1)) {
570 constant = getConstantOperand(op1).asInt32();
571 op = op2;
572 return true;
573 }
574
575 if (isOperandConstantImmediateInt(op2)) {
576 constant = getConstantOperand(op2).asInt32();
577 op = op1;
578 return true;
579 }
580
581 return false;
582 }
583
584 /* Deprecated: Please use JITStubCall instead. */
585
emitPutJITStubArg(RegisterID tag,RegisterID payload,unsigned argumentNumber)586 ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber)
587 {
588 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
589 poke(payload, argumentStackOffset);
590 poke(tag, argumentStackOffset + 1);
591 }
592
593 /* Deprecated: Please use JITStubCall instead. */
594
emitPutJITStubArgFromVirtualRegister(unsigned src,unsigned argumentNumber,RegisterID scratch1,RegisterID scratch2)595 ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
596 {
597 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
598 if (m_codeBlock->isConstantRegisterIndex(src)) {
599 JSValue constant = m_codeBlock->getConstant(src);
600 poke(Imm32(constant.payload()), argumentStackOffset);
601 poke(Imm32(constant.tag()), argumentStackOffset + 1);
602 } else {
603 emitLoad(src, scratch1, scratch2);
604 poke(scratch2, argumentStackOffset);
605 poke(scratch1, argumentStackOffset + 1);
606 }
607 }
608
609 #else // USE(JSVALUE32_64)
610
killLastResultRegister()611 ALWAYS_INLINE void JIT::killLastResultRegister()
612 {
613 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
614 }
615
616 // get arg puts an arg from the SF register array into a h/w register
emitGetVirtualRegister(int src,RegisterID dst)617 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
618 {
619 ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
620
621 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
622 if (m_codeBlock->isConstantRegisterIndex(src)) {
623 JSValue value = m_codeBlock->getConstant(src);
624 move(ImmPtr(JSValue::encode(value)), dst);
625 killLastResultRegister();
626 return;
627 }
628
629 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
630 bool atJumpTarget = false;
631 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
632 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
633 atJumpTarget = true;
634 ++m_jumpTargetsPosition;
635 }
636
637 if (!atJumpTarget) {
638 // The argument we want is already stored in eax
639 if (dst != cachedResultRegister)
640 move(cachedResultRegister, dst);
641 killLastResultRegister();
642 return;
643 }
644 }
645
646 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
647 killLastResultRegister();
648 }
649
emitGetVirtualRegisters(int src1,RegisterID dst1,int src2,RegisterID dst2)650 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
651 {
652 if (src2 == m_lastResultBytecodeRegister) {
653 emitGetVirtualRegister(src2, dst2);
654 emitGetVirtualRegister(src1, dst1);
655 } else {
656 emitGetVirtualRegister(src1, dst1);
657 emitGetVirtualRegister(src2, dst2);
658 }
659 }
660
getConstantOperandImmediateInt(unsigned src)661 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
662 {
663 return getConstantOperand(src).asInt32();
664 }
665
isOperandConstantImmediateInt(unsigned src)666 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
667 {
668 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
669 }
670
emitPutVirtualRegister(unsigned dst,RegisterID from)671 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
672 {
673 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
674 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
675 }
676
emitInitRegister(unsigned dst)677 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
678 {
679 storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
680 }
681
emitJumpIfJSCell(RegisterID reg)682 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
683 {
684 #if USE(JSVALUE64)
685 return branchTestPtr(Zero, reg, tagMaskRegister);
686 #else
687 return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
688 #endif
689 }
690
emitJumpIfBothJSCells(RegisterID reg1,RegisterID reg2,RegisterID scratch)691 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
692 {
693 move(reg1, scratch);
694 orPtr(reg2, scratch);
695 return emitJumpIfJSCell(scratch);
696 }
697
emitJumpSlowCaseIfJSCell(RegisterID reg)698 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
699 {
700 addSlowCase(emitJumpIfJSCell(reg));
701 }
702
emitJumpIfNotJSCell(RegisterID reg)703 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
704 {
705 #if USE(JSVALUE64)
706 return branchTestPtr(NonZero, reg, tagMaskRegister);
707 #else
708 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
709 #endif
710 }
711
emitJumpSlowCaseIfNotJSCell(RegisterID reg)712 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
713 {
714 addSlowCase(emitJumpIfNotJSCell(reg));
715 }
716
emitJumpSlowCaseIfNotJSCell(RegisterID reg,int vReg)717 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
718 {
719 if (!m_codeBlock->isKnownNotImmediate(vReg))
720 emitJumpSlowCaseIfNotJSCell(reg);
721 }
722
723 #if USE(JSVALUE64)
emitJumpIfImmediateNumber(RegisterID reg)724 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
725 {
726 return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
727 }
emitJumpIfNotImmediateNumber(RegisterID reg)728 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
729 {
730 return branchTestPtr(Zero, reg, tagTypeNumberRegister);
731 }
732
emitLoadDouble(unsigned index,FPRegisterID value)733 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
734 {
735 if (m_codeBlock->isConstantRegisterIndex(index)) {
736 Register& inConstantPool = m_codeBlock->constantRegister(index);
737 loadDouble(&inConstantPool, value);
738 } else
739 loadDouble(addressFor(index), value);
740 }
741
emitLoadInt32ToDouble(unsigned index,FPRegisterID value)742 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
743 {
744 if (m_codeBlock->isConstantRegisterIndex(index)) {
745 Register& inConstantPool = m_codeBlock->constantRegister(index);
746 convertInt32ToDouble(AbsoluteAddress(&inConstantPool), value);
747 } else
748 convertInt32ToDouble(addressFor(index), value);
749 }
750 #endif
751
emitJumpIfImmediateInteger(RegisterID reg)752 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
753 {
754 #if USE(JSVALUE64)
755 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
756 #else
757 return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
758 #endif
759 }
760
emitJumpIfNotImmediateInteger(RegisterID reg)761 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
762 {
763 #if USE(JSVALUE64)
764 return branchPtr(Below, reg, tagTypeNumberRegister);
765 #else
766 return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
767 #endif
768 }
769
emitJumpIfNotImmediateIntegers(RegisterID reg1,RegisterID reg2,RegisterID scratch)770 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
771 {
772 move(reg1, scratch);
773 andPtr(reg2, scratch);
774 return emitJumpIfNotImmediateInteger(scratch);
775 }
776
emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)777 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
778 {
779 addSlowCase(emitJumpIfNotImmediateInteger(reg));
780 }
781
emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1,RegisterID reg2,RegisterID scratch)782 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
783 {
784 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
785 }
786
emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)787 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
788 {
789 addSlowCase(emitJumpIfNotImmediateNumber(reg));
790 }
791
792 #if !USE(JSVALUE64)
emitFastArithDeTagImmediate(RegisterID reg)793 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
794 {
795 subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
796 }
797
emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)798 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
799 {
800 return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
801 }
802 #endif
803
emitFastArithReTagImmediate(RegisterID src,RegisterID dest)804 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
805 {
806 #if USE(JSVALUE64)
807 emitFastArithIntToImmNoCheck(src, dest);
808 #else
809 if (src != dest)
810 move(src, dest);
811 addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
812 #endif
813 }
814
emitFastArithImmToInt(RegisterID reg)815 ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
816 {
817 #if USE(JSVALUE64)
818 UNUSED_PARAM(reg);
819 #else
820 rshift32(Imm32(JSImmediate::IntegerPayloadShift), reg);
821 #endif
822 }
823
824 // operand is int32_t, must have been zero-extended if register is 64-bit.
emitFastArithIntToImmNoCheck(RegisterID src,RegisterID dest)825 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
826 {
827 #if USE(JSVALUE64)
828 if (src != dest)
829 move(src, dest);
830 orPtr(tagTypeNumberRegister, dest);
831 #else
832 signExtend32ToPtr(src, dest);
833 addPtr(dest, dest);
834 emitFastArithReTagImmediate(dest, dest);
835 #endif
836 }
837
emitTagAsBoolImmediate(RegisterID reg)838 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
839 {
840 lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
841 or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
842 }
843
844 /* Deprecated: Please use JITStubCall instead. */
845
846 // get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
emitPutJITStubArgFromVirtualRegister(unsigned src,unsigned argumentNumber,RegisterID scratch)847 ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
848 {
849 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
850 if (m_codeBlock->isConstantRegisterIndex(src)) {
851 JSValue value = m_codeBlock->getConstant(src);
852 poke(ImmPtr(JSValue::encode(value)), argumentStackOffset);
853 } else {
854 loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
855 poke(scratch, argumentStackOffset);
856 }
857
858 killLastResultRegister();
859 }
860
861 #endif // USE(JSVALUE32_64)
862
863 } // namespace JSC
864
865 #endif // ENABLE(JIT)
866
867 #endif
868