• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
28 
29 
30 #if ENABLE(JIT)
31 
32 namespace JSC {
33 
34 /* Deprecated: Please use JITStubCall instead. */
35 
emitGetJITStubArg(unsigned argumentNumber,RegisterID dst)36 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
37 {
38     unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
39     peek(dst, argumentStackOffset);
40 }
41 
isOperandConstantImmediateDouble(unsigned src)42 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
43 {
44     return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
45 }
46 
getConstantOperand(unsigned src)47 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
48 {
49     ASSERT(m_codeBlock->isConstantRegisterIndex(src));
50     return m_codeBlock->getConstant(src);
51 }
52 
emitPutToCallFrameHeader(RegisterID from,RegisterFile::CallFrameHeaderEntry entry)53 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
54 {
55     storePtr(from, payloadFor(entry, callFrameRegister));
56 }
57 
emitPutCellToCallFrameHeader(RegisterID from,RegisterFile::CallFrameHeaderEntry entry)58 ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
59 {
60 #if USE(JSVALUE32_64)
61     store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
62 #endif
63     storePtr(from, payloadFor(entry, callFrameRegister));
64 }
65 
emitPutIntToCallFrameHeader(RegisterID from,RegisterFile::CallFrameHeaderEntry entry)66 ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
67 {
68     store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
69     store32(from, intPayloadFor(entry, callFrameRegister));
70 }
71 
emitPutImmediateToCallFrameHeader(void * value,RegisterFile::CallFrameHeaderEntry entry)72 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
73 {
74     storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
75 }
76 
emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry,RegisterID to,RegisterID from)77 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
78 {
79     loadPtr(Address(from, entry * sizeof(Register)), to);
80 #if USE(JSVALUE64)
81     killLastResultRegister();
82 #endif
83 }
84 
emitLoadCharacterString(RegisterID src,RegisterID dst,JumpList & failures)85 ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
86 {
87     failures.append(branchPtr(NotEqual, Address(src), TrustedImmPtr(m_globalData->jsStringVPtr)));
88     failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
89     failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
90     loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
91     loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
92     load16(MacroAssembler::Address(dst, 0), dst);
93 }
94 
emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry,RegisterID to,RegisterID from)95 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
96 {
97     load32(Address(from, entry * sizeof(Register)), to);
98 #if USE(JSVALUE64)
99     killLastResultRegister();
100 #endif
101 }
102 
emitNakedCall(CodePtr function)103 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
104 {
105     ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
106 
107     Call nakedCall = nearCall();
108     m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
109     return nakedCall;
110 }
111 
112 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
113 
beginUninterruptedSequence(int insnSpace,int constSpace)114 ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
115 {
116     JSInterfaceJIT::beginUninterruptedSequence();
117 #if CPU(ARM_TRADITIONAL)
118 #ifndef NDEBUG
119     // Ensure the label after the sequence can also fit
120     insnSpace += sizeof(ARMWord);
121     constSpace += sizeof(uint64_t);
122 #endif
123 
124     ensureSpace(insnSpace, constSpace);
125 
126 #elif CPU(SH4)
127 #ifndef NDEBUG
128     insnSpace += sizeof(SH4Word);
129     constSpace += sizeof(uint64_t);
130 #endif
131 
132     m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
133 #endif
134 
135 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
136 #ifndef NDEBUG
137     m_uninterruptedInstructionSequenceBegin = label();
138     m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
139 #endif
140 #endif
141 }
142 
endUninterruptedSequence(int insnSpace,int constSpace,int dst)143 ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
144 {
145     UNUSED_PARAM(dst);
146 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
147     /* There are several cases when the uninterrupted sequence is larger than
148      * maximum required offset for pathing the same sequence. Eg.: if in a
149      * uninterrupted sequence the last macroassembler's instruction is a stub
150      * call, it emits store instruction(s) which should not be included in the
151      * calculation of length of uninterrupted sequence. So, the insnSpace and
152      * constSpace should be upper limit instead of hard limit.
153      */
154 #if CPU(SH4)
155     if ((dst > 15) || (dst < -16)) {
156         insnSpace += 8;
157         constSpace += 2;
158     }
159 
160     if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
161         insnSpace += 8;
162 #endif
163     ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
164     ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
165 #endif
166     JSInterfaceJIT::endUninterruptedSequence();
167 }
168 
169 #endif
170 
171 #if CPU(ARM)
172 
preserveReturnAddressAfterCall(RegisterID reg)173 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
174 {
175     move(linkRegister, reg);
176 }
177 
restoreReturnAddressBeforeReturn(RegisterID reg)178 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
179 {
180     move(reg, linkRegister);
181 }
182 
restoreReturnAddressBeforeReturn(Address address)183 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
184 {
185     loadPtr(address, linkRegister);
186 }
187 #elif CPU(SH4)
188 
preserveReturnAddressAfterCall(RegisterID reg)189 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
190 {
191     m_assembler.stspr(reg);
192 }
193 
restoreReturnAddressBeforeReturn(RegisterID reg)194 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
195 {
196     m_assembler.ldspr(reg);
197 }
198 
restoreReturnAddressBeforeReturn(Address address)199 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
200 {
201     loadPtrLinkReg(address);
202 }
203 
204 #elif CPU(MIPS)
205 
preserveReturnAddressAfterCall(RegisterID reg)206 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
207 {
208     move(returnAddressRegister, reg);
209 }
210 
restoreReturnAddressBeforeReturn(RegisterID reg)211 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
212 {
213     move(reg, returnAddressRegister);
214 }
215 
restoreReturnAddressBeforeReturn(Address address)216 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
217 {
218     loadPtr(address, returnAddressRegister);
219 }
220 
221 #else // CPU(X86) || CPU(X86_64)
222 
preserveReturnAddressAfterCall(RegisterID reg)223 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
224 {
225     pop(reg);
226 }
227 
restoreReturnAddressBeforeReturn(RegisterID reg)228 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
229 {
230     push(reg);
231 }
232 
restoreReturnAddressBeforeReturn(Address address)233 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
234 {
235     push(address);
236 }
237 
238 #endif
239 
restoreArgumentReference()240 ALWAYS_INLINE void JIT::restoreArgumentReference()
241 {
242     move(stackPointerRegister, firstArgumentRegister);
243     poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
244 }
245 
restoreArgumentReferenceForTrampoline()246 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
247 {
248 #if CPU(X86)
249     // Within a trampoline the return address will be on the stack at this point.
250     addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
251 #elif CPU(ARM)
252     move(stackPointerRegister, firstArgumentRegister);
253 #elif CPU(SH4)
254     move(stackPointerRegister, firstArgumentRegister);
255 #endif
256     // In the trampoline on x86-64, the first argument register is not overwritten.
257 }
258 
checkStructure(RegisterID reg,Structure * structure)259 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
260 {
261     return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
262 }
263 
linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator & iter,int vReg)264 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
265 {
266     if (!m_codeBlock->isKnownNotImmediate(vReg))
267         linkSlowCase(iter);
268 }
269 
addSlowCase(Jump jump)270 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
271 {
272     ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
273 
274     m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
275 }
276 
addSlowCase(JumpList jumpList)277 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
278 {
279     ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
280 
281     const JumpList::JumpVector& jumpVector = jumpList.jumps();
282     size_t size = jumpVector.size();
283     for (size_t i = 0; i < size; ++i)
284         m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
285 }
286 
addJump(Jump jump,int relativeOffset)287 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
288 {
289     ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
290 
291     m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
292 }
293 
emitJumpSlowToHot(Jump jump,int relativeOffset)294 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
295 {
296     ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
297 
298     jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
299 }
300 
301 #if ENABLE(SAMPLING_FLAGS)
setSamplingFlag(int32_t flag)302 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
303 {
304     ASSERT(flag >= 1);
305     ASSERT(flag <= 32);
306     or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
307 }
308 
clearSamplingFlag(int32_t flag)309 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
310 {
311     ASSERT(flag >= 1);
312     ASSERT(flag <= 32);
313     and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
314 }
315 #endif
316 
317 #if ENABLE(SAMPLING_COUNTERS)
emitCount(AbstractSamplingCounter & counter,uint32_t count)318 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
319 {
320 #if CPU(X86_64) // Or any other 64-bit plattform.
321     addPtr(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
322 #elif CPU(X86) // Or any other little-endian 32-bit plattform.
323     intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
324     add32(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
325     addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
326 #else
327 #error "SAMPLING_FLAGS not implemented on this platform."
328 #endif
329 }
330 #endif
331 
332 #if ENABLE(OPCODE_SAMPLING)
333 #if CPU(X86_64)
sampleInstruction(Instruction * instruction,bool inHostFunction)334 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
335 {
336     move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
337     storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
338 }
339 #else
sampleInstruction(Instruction * instruction,bool inHostFunction)340 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
341 {
342     storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
343 }
344 #endif
345 #endif
346 
347 #if ENABLE(CODEBLOCK_SAMPLING)
348 #if CPU(X86_64)
sampleCodeBlock(CodeBlock * codeBlock)349 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
350 {
351     move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
352     storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
353 }
354 #else
sampleCodeBlock(CodeBlock * codeBlock)355 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
356 {
357     storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
358 }
359 #endif
360 #endif
361 
isOperandConstantImmediateChar(unsigned src)362 ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
363 {
364     return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
365 }
366 
367 #if USE(JSVALUE32_64)
368 
emitLoadTag(unsigned index,RegisterID tag)369 inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
370 {
371     RegisterID mappedTag;
372     if (getMappedTag(index, mappedTag)) {
373         move(mappedTag, tag);
374         unmap(tag);
375         return;
376     }
377 
378     if (m_codeBlock->isConstantRegisterIndex(index)) {
379         move(Imm32(getConstantOperand(index).tag()), tag);
380         unmap(tag);
381         return;
382     }
383 
384     load32(tagFor(index), tag);
385     unmap(tag);
386 }
387 
emitLoadPayload(unsigned index,RegisterID payload)388 inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
389 {
390     RegisterID mappedPayload;
391     if (getMappedPayload(index, mappedPayload)) {
392         move(mappedPayload, payload);
393         unmap(payload);
394         return;
395     }
396 
397     if (m_codeBlock->isConstantRegisterIndex(index)) {
398         move(Imm32(getConstantOperand(index).payload()), payload);
399         unmap(payload);
400         return;
401     }
402 
403     load32(payloadFor(index), payload);
404     unmap(payload);
405 }
406 
emitLoad(const JSValue & v,RegisterID tag,RegisterID payload)407 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
408 {
409     move(Imm32(v.payload()), payload);
410     move(Imm32(v.tag()), tag);
411 }
412 
emitLoad(unsigned index,RegisterID tag,RegisterID payload,RegisterID base)413 inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
414 {
415     ASSERT(tag != payload);
416 
417     if (base == callFrameRegister) {
418         ASSERT(payload != base);
419         emitLoadPayload(index, payload);
420         emitLoadTag(index, tag);
421         return;
422     }
423 
424     if (payload == base) { // avoid stomping base
425         load32(tagFor(index, base), tag);
426         load32(payloadFor(index, base), payload);
427         return;
428     }
429 
430     load32(payloadFor(index, base), payload);
431     load32(tagFor(index, base), tag);
432 }
433 
emitLoad2(unsigned index1,RegisterID tag1,RegisterID payload1,unsigned index2,RegisterID tag2,RegisterID payload2)434 inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
435 {
436     if (isMapped(index1)) {
437         emitLoad(index1, tag1, payload1);
438         emitLoad(index2, tag2, payload2);
439         return;
440     }
441     emitLoad(index2, tag2, payload2);
442     emitLoad(index1, tag1, payload1);
443 }
444 
emitLoadDouble(unsigned index,FPRegisterID value)445 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
446 {
447     if (m_codeBlock->isConstantRegisterIndex(index)) {
448         WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
449         loadDouble(&inConstantPool, value);
450     } else
451         loadDouble(addressFor(index), value);
452 }
453 
emitLoadInt32ToDouble(unsigned index,FPRegisterID value)454 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
455 {
456     if (m_codeBlock->isConstantRegisterIndex(index)) {
457         WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
458         char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
459         convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
460     } else
461         convertInt32ToDouble(payloadFor(index), value);
462 }
463 
emitStore(unsigned index,RegisterID tag,RegisterID payload,RegisterID base)464 inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
465 {
466     store32(payload, payloadFor(index, base));
467     store32(tag, tagFor(index, base));
468 }
469 
emitStoreInt32(unsigned index,RegisterID payload,bool indexIsInt32)470 inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
471 {
472     store32(payload, payloadFor(index, callFrameRegister));
473     if (!indexIsInt32)
474         store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
475 }
476 
emitStoreInt32(unsigned index,TrustedImm32 payload,bool indexIsInt32)477 inline void JIT::emitStoreInt32(unsigned index, TrustedImm32 payload, bool indexIsInt32)
478 {
479     store32(payload, payloadFor(index, callFrameRegister));
480     if (!indexIsInt32)
481         store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
482 }
483 
emitStoreCell(unsigned index,RegisterID payload,bool indexIsCell)484 inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
485 {
486     store32(payload, payloadFor(index, callFrameRegister));
487     if (!indexIsCell)
488         store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
489 }
490 
emitStoreBool(unsigned index,RegisterID payload,bool indexIsBool)491 inline void JIT::emitStoreBool(unsigned index, RegisterID payload, bool indexIsBool)
492 {
493     store32(payload, payloadFor(index, callFrameRegister));
494     if (!indexIsBool)
495         store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
496 }
497 
emitStoreDouble(unsigned index,FPRegisterID value)498 inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
499 {
500     storeDouble(value, addressFor(index));
501 }
502 
emitStore(unsigned index,const JSValue constant,RegisterID base)503 inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
504 {
505     store32(Imm32(constant.payload()), payloadFor(index, base));
506     store32(Imm32(constant.tag()), tagFor(index, base));
507 }
508 
emitInitRegister(unsigned dst)509 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
510 {
511     emitStore(dst, jsUndefined());
512 }
513 
isLabeled(unsigned bytecodeOffset)514 inline bool JIT::isLabeled(unsigned bytecodeOffset)
515 {
516     for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
517         unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
518         if (jumpTarget == bytecodeOffset)
519             return true;
520         if (jumpTarget > bytecodeOffset)
521             return false;
522     }
523     return false;
524 }
525 
map(unsigned bytecodeOffset,unsigned virtualRegisterIndex,RegisterID tag,RegisterID payload)526 inline void JIT::map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
527 {
528     if (isLabeled(bytecodeOffset))
529         return;
530 
531     m_mappedBytecodeOffset = bytecodeOffset;
532     m_mappedVirtualRegisterIndex = virtualRegisterIndex;
533     m_mappedTag = tag;
534     m_mappedPayload = payload;
535 }
536 
unmap(RegisterID registerID)537 inline void JIT::unmap(RegisterID registerID)
538 {
539     if (m_mappedTag == registerID)
540         m_mappedTag = (RegisterID)-1;
541     else if (m_mappedPayload == registerID)
542         m_mappedPayload = (RegisterID)-1;
543 }
544 
unmap()545 inline void JIT::unmap()
546 {
547     m_mappedBytecodeOffset = (unsigned)-1;
548     m_mappedVirtualRegisterIndex = (unsigned)-1;
549     m_mappedTag = (RegisterID)-1;
550     m_mappedPayload = (RegisterID)-1;
551 }
552 
isMapped(unsigned virtualRegisterIndex)553 inline bool JIT::isMapped(unsigned virtualRegisterIndex)
554 {
555     if (m_mappedBytecodeOffset != m_bytecodeOffset)
556         return false;
557     if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
558         return false;
559     return true;
560 }
561 
getMappedPayload(unsigned virtualRegisterIndex,RegisterID & payload)562 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
563 {
564     if (m_mappedBytecodeOffset != m_bytecodeOffset)
565         return false;
566     if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
567         return false;
568     if (m_mappedPayload == (RegisterID)-1)
569         return false;
570     payload = m_mappedPayload;
571     return true;
572 }
573 
getMappedTag(unsigned virtualRegisterIndex,RegisterID & tag)574 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
575 {
576     if (m_mappedBytecodeOffset != m_bytecodeOffset)
577         return false;
578     if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
579         return false;
580     if (m_mappedTag == (RegisterID)-1)
581         return false;
582     tag = m_mappedTag;
583     return true;
584 }
585 
emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)586 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
587 {
588     if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
589         if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
590             addSlowCase(jump());
591         else
592             addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
593     }
594 }
595 
emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex,RegisterID tag)596 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
597 {
598     if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
599         if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
600             addSlowCase(jump());
601         else
602             addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
603     }
604 }
605 
linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator & iter,unsigned virtualRegisterIndex)606 inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
607 {
608     if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
609         linkSlowCase(iter);
610 }
611 
isOperandConstantImmediateInt(unsigned src)612 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
613 {
614     return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
615 }
616 
getOperandConstantImmediateInt(unsigned op1,unsigned op2,unsigned & op,int32_t & constant)617 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
618 {
619     if (isOperandConstantImmediateInt(op1)) {
620         constant = getConstantOperand(op1).asInt32();
621         op = op2;
622         return true;
623     }
624 
625     if (isOperandConstantImmediateInt(op2)) {
626         constant = getConstantOperand(op2).asInt32();
627         op = op1;
628         return true;
629     }
630 
631     return false;
632 }
633 
634 #else // USE(JSVALUE32_64)
635 
killLastResultRegister()636 ALWAYS_INLINE void JIT::killLastResultRegister()
637 {
638     m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
639 }
640 
641 // get arg puts an arg from the SF register array into a h/w register
emitGetVirtualRegister(int src,RegisterID dst)642 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
643 {
644     ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
645 
646     // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
647     if (m_codeBlock->isConstantRegisterIndex(src)) {
648         JSValue value = m_codeBlock->getConstant(src);
649         move(ImmPtr(JSValue::encode(value)), dst);
650         killLastResultRegister();
651         return;
652     }
653 
654     if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
655         bool atJumpTarget = false;
656         while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
657             if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
658                 atJumpTarget = true;
659             ++m_jumpTargetsPosition;
660         }
661 
662         if (!atJumpTarget) {
663             // The argument we want is already stored in eax
664             if (dst != cachedResultRegister)
665                 move(cachedResultRegister, dst);
666             killLastResultRegister();
667             return;
668         }
669     }
670 
671     loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
672     killLastResultRegister();
673 }
674 
emitGetVirtualRegisters(int src1,RegisterID dst1,int src2,RegisterID dst2)675 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
676 {
677     if (src2 == m_lastResultBytecodeRegister) {
678         emitGetVirtualRegister(src2, dst2);
679         emitGetVirtualRegister(src1, dst1);
680     } else {
681         emitGetVirtualRegister(src1, dst1);
682         emitGetVirtualRegister(src2, dst2);
683     }
684 }
685 
getConstantOperandImmediateInt(unsigned src)686 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
687 {
688     return getConstantOperand(src).asInt32();
689 }
690 
isOperandConstantImmediateInt(unsigned src)691 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
692 {
693     return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
694 }
695 
emitPutVirtualRegister(unsigned dst,RegisterID from)696 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
697 {
698     storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
699     m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
700 }
701 
emitInitRegister(unsigned dst)702 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
703 {
704     storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
705 }
706 
emitJumpIfJSCell(RegisterID reg)707 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
708 {
709 #if USE(JSVALUE64)
710     return branchTestPtr(Zero, reg, tagMaskRegister);
711 #else
712     return branchTest32(Zero, reg, TrustedImm32(TagMask));
713 #endif
714 }
715 
emitJumpIfBothJSCells(RegisterID reg1,RegisterID reg2,RegisterID scratch)716 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
717 {
718     move(reg1, scratch);
719     orPtr(reg2, scratch);
720     return emitJumpIfJSCell(scratch);
721 }
722 
emitJumpSlowCaseIfJSCell(RegisterID reg)723 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
724 {
725     addSlowCase(emitJumpIfJSCell(reg));
726 }
727 
emitJumpIfNotJSCell(RegisterID reg)728 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
729 {
730 #if USE(JSVALUE64)
731     return branchTestPtr(NonZero, reg, tagMaskRegister);
732 #else
733     return branchTest32(NonZero, reg, TrustedImm32(TagMask));
734 #endif
735 }
736 
emitJumpSlowCaseIfNotJSCell(RegisterID reg)737 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
738 {
739     addSlowCase(emitJumpIfNotJSCell(reg));
740 }
741 
emitJumpSlowCaseIfNotJSCell(RegisterID reg,int vReg)742 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
743 {
744     if (!m_codeBlock->isKnownNotImmediate(vReg))
745         emitJumpSlowCaseIfNotJSCell(reg);
746 }
747 
748 #if USE(JSVALUE64)
749 
emitLoadDouble(unsigned index,FPRegisterID value)750 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
751 {
752     if (m_codeBlock->isConstantRegisterIndex(index)) {
753         WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
754         loadDouble(&inConstantPool, value);
755     } else
756         loadDouble(addressFor(index), value);
757 }
758 
emitLoadInt32ToDouble(unsigned index,FPRegisterID value)759 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
760 {
761     if (m_codeBlock->isConstantRegisterIndex(index)) {
762         ASSERT(isOperandConstantImmediateInt(index));
763         convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
764     } else
765         convertInt32ToDouble(addressFor(index), value);
766 }
767 #endif
768 
emitJumpIfImmediateInteger(RegisterID reg)769 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
770 {
771 #if USE(JSVALUE64)
772     return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
773 #else
774     return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
775 #endif
776 }
777 
emitJumpIfNotImmediateInteger(RegisterID reg)778 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
779 {
780 #if USE(JSVALUE64)
781     return branchPtr(Below, reg, tagTypeNumberRegister);
782 #else
783     return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
784 #endif
785 }
786 
emitJumpIfNotImmediateIntegers(RegisterID reg1,RegisterID reg2,RegisterID scratch)787 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
788 {
789     move(reg1, scratch);
790     andPtr(reg2, scratch);
791     return emitJumpIfNotImmediateInteger(scratch);
792 }
793 
emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)794 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
795 {
796     addSlowCase(emitJumpIfNotImmediateInteger(reg));
797 }
798 
emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1,RegisterID reg2,RegisterID scratch)799 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
800 {
801     addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
802 }
803 
emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)804 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
805 {
806     addSlowCase(emitJumpIfNotImmediateNumber(reg));
807 }
808 
809 #if USE(JSVALUE32_64)
emitFastArithDeTagImmediate(RegisterID reg)810 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
811 {
812     subPtr(TrustedImm32(TagTypeNumber), reg);
813 }
814 
emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)815 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
816 {
817     return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
818 }
819 #endif
820 
emitFastArithReTagImmediate(RegisterID src,RegisterID dest)821 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
822 {
823 #if USE(JSVALUE64)
824     emitFastArithIntToImmNoCheck(src, dest);
825 #else
826     if (src != dest)
827         move(src, dest);
828     addPtr(TrustedImm32(TagTypeNumber), dest);
829 #endif
830 }
831 
832 // operand is int32_t, must have been zero-extended if register is 64-bit.
emitFastArithIntToImmNoCheck(RegisterID src,RegisterID dest)833 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
834 {
835 #if USE(JSVALUE64)
836     if (src != dest)
837         move(src, dest);
838     orPtr(tagTypeNumberRegister, dest);
839 #else
840     signExtend32ToPtr(src, dest);
841     addPtr(dest, dest);
842     emitFastArithReTagImmediate(dest, dest);
843 #endif
844 }
845 
emitTagAsBoolImmediate(RegisterID reg)846 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
847 {
848     or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
849 }
850 
851 #endif // USE(JSVALUE32_64)
852 
853 } // namespace JSC
854 
855 #endif // ENABLE(JIT)
856 
857 #endif
858