• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include "config.h"
27 #include "JIT.h"
28 
29 #if ENABLE(JIT)
30 
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
34 #include "JSArray.h"
35 #include "JSFunction.h"
36 #include "Interpreter.h"
37 #include "LinkBuffer.h"
38 #include "RepatchBuffer.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
41 
42 #ifndef NDEBUG
43 #include <stdio.h>
44 #endif
45 
46 using namespace std;
47 
48 namespace JSC {
49 
50 #if USE(JSVALUE32_64)
51 
emit_op_put_by_index(Instruction * currentInstruction)52 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
53 {
54     unsigned base = currentInstruction[1].u.operand;
55     unsigned property = currentInstruction[2].u.operand;
56     unsigned value = currentInstruction[3].u.operand;
57 
58     JITStubCall stubCall(this, cti_op_put_by_index);
59     stubCall.addArgument(base);
60     stubCall.addArgument(Imm32(property));
61     stubCall.addArgument(value);
62     stubCall.call();
63 }
64 
emit_op_put_getter(Instruction * currentInstruction)65 void JIT::emit_op_put_getter(Instruction* currentInstruction)
66 {
67     unsigned base = currentInstruction[1].u.operand;
68     unsigned property = currentInstruction[2].u.operand;
69     unsigned function = currentInstruction[3].u.operand;
70 
71     JITStubCall stubCall(this, cti_op_put_getter);
72     stubCall.addArgument(base);
73     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
74     stubCall.addArgument(function);
75     stubCall.call();
76 }
77 
emit_op_put_setter(Instruction * currentInstruction)78 void JIT::emit_op_put_setter(Instruction* currentInstruction)
79 {
80     unsigned base = currentInstruction[1].u.operand;
81     unsigned property = currentInstruction[2].u.operand;
82     unsigned function = currentInstruction[3].u.operand;
83 
84     JITStubCall stubCall(this, cti_op_put_setter);
85     stubCall.addArgument(base);
86     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
87     stubCall.addArgument(function);
88     stubCall.call();
89 }
90 
emit_op_del_by_id(Instruction * currentInstruction)91 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
92 {
93     unsigned dst = currentInstruction[1].u.operand;
94     unsigned base = currentInstruction[2].u.operand;
95     unsigned property = currentInstruction[3].u.operand;
96 
97     JITStubCall stubCall(this, cti_op_del_by_id);
98     stubCall.addArgument(base);
99     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
100     stubCall.call(dst);
101 }
102 
103 
104 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
105 
106 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
107 
108 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
emit_op_method_check(Instruction *)109 void JIT::emit_op_method_check(Instruction*) {}
emitSlow_op_method_check(Instruction *,Vector<SlowCaseEntry>::iterator &)110 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
111 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
112 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
113 #endif
114 
emit_op_get_by_val(Instruction * currentInstruction)115 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
116 {
117     unsigned dst = currentInstruction[1].u.operand;
118     unsigned base = currentInstruction[2].u.operand;
119     unsigned property = currentInstruction[3].u.operand;
120 
121     JITStubCall stubCall(this, cti_op_get_by_val);
122     stubCall.addArgument(base);
123     stubCall.addArgument(property);
124     stubCall.call(dst);
125 }
126 
emitSlow_op_get_by_val(Instruction *,Vector<SlowCaseEntry>::iterator &)127 void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
128 {
129     ASSERT_NOT_REACHED();
130 }
131 
emit_op_put_by_val(Instruction * currentInstruction)132 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
133 {
134     unsigned base = currentInstruction[1].u.operand;
135     unsigned property = currentInstruction[2].u.operand;
136     unsigned value = currentInstruction[3].u.operand;
137 
138     JITStubCall stubCall(this, cti_op_put_by_val);
139     stubCall.addArgument(base);
140     stubCall.addArgument(property);
141     stubCall.addArgument(value);
142     stubCall.call();
143 }
144 
emitSlow_op_put_by_val(Instruction *,Vector<SlowCaseEntry>::iterator &)145 void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
146 {
147     ASSERT_NOT_REACHED();
148 }
149 
emit_op_get_by_id(Instruction * currentInstruction)150 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
151 {
152     int dst = currentInstruction[1].u.operand;
153     int base = currentInstruction[2].u.operand;
154     int ident = currentInstruction[3].u.operand;
155 
156     JITStubCall stubCall(this, cti_op_get_by_id_generic);
157     stubCall.addArgument(base);
158     stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
159     stubCall.call(dst);
160 
161     m_propertyAccessInstructionIndex++;
162 }
163 
emitSlow_op_get_by_id(Instruction *,Vector<SlowCaseEntry>::iterator &)164 void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
165 {
166     m_propertyAccessInstructionIndex++;
167     ASSERT_NOT_REACHED();
168 }
169 
emit_op_put_by_id(Instruction * currentInstruction)170 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
171 {
172     int base = currentInstruction[1].u.operand;
173     int ident = currentInstruction[2].u.operand;
174     int value = currentInstruction[3].u.operand;
175 
176     JITStubCall stubCall(this, cti_op_put_by_id_generic);
177     stubCall.addArgument(base);
178     stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
179     stubCall.addArgument(value);
180     stubCall.call();
181 
182     m_propertyAccessInstructionIndex++;
183 }
184 
emitSlow_op_put_by_id(Instruction *,Vector<SlowCaseEntry>::iterator &)185 void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
186 {
187     m_propertyAccessInstructionIndex++;
188     ASSERT_NOT_REACHED();
189 }
190 
191 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
192 
193 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
194 
195 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
196 
emit_op_method_check(Instruction * currentInstruction)197 void JIT::emit_op_method_check(Instruction* currentInstruction)
198 {
199     // Assert that the following instruction is a get_by_id.
200     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
201 
202     currentInstruction += OPCODE_LENGTH(op_method_check);
203 
204     // Do the method check - check the object & its prototype's structure inline (this is the common case).
205     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
206     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
207 
208     int dst = currentInstruction[1].u.operand;
209     int base = currentInstruction[2].u.operand;
210 
211     emitLoad(base, regT1, regT0);
212     emitJumpSlowCaseIfNotJSCell(base, regT1);
213 
214     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
215     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
216     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
217 
218     // This will be relinked to load the function without doing a load.
219     DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
220     move(Imm32(JSValue::CellTag), regT1);
221     Jump match = jump();
222 
223     ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
224     ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
225     ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
226 
227     // Link the failure cases here.
228     structureCheck.link(this);
229     protoStructureCheck.link(this);
230 
231     // Do a regular(ish) get_by_id (the slow case will be link to
232     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
233     compileGetByIdHotPath();
234 
235     match.link(this);
236     emitStore(dst, regT1, regT0);
237     map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
238 
239     // We've already generated the following get_by_id, so make sure it's skipped over.
240     m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
241 }
242 
emitSlow_op_method_check(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)243 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
244 {
245     currentInstruction += OPCODE_LENGTH(op_method_check);
246 
247     int dst = currentInstruction[1].u.operand;
248     int base = currentInstruction[2].u.operand;
249     int ident = currentInstruction[3].u.operand;
250 
251     compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
252 
253     // We've already generated the following get_by_id, so make sure it's skipped over.
254     m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
255 }
256 
257 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
258 
259 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
emit_op_method_check(Instruction *)260 void JIT::emit_op_method_check(Instruction*) {}
emitSlow_op_method_check(Instruction *,Vector<SlowCaseEntry>::iterator &)261 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
262 
263 #endif
264 
emit_op_get_by_val(Instruction * currentInstruction)265 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
266 {
267     unsigned dst = currentInstruction[1].u.operand;
268     unsigned base = currentInstruction[2].u.operand;
269     unsigned property = currentInstruction[3].u.operand;
270 
271     emitLoad2(base, regT1, regT0, property, regT3, regT2);
272 
273     addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
274     emitJumpSlowCaseIfNotJSCell(base, regT1);
275     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
276     addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
277 
278     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
279     load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
280     load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
281     emitStore(dst, regT1, regT0);
282     map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
283 }
284 
emitSlow_op_get_by_val(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)285 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
286 {
287     unsigned dst = currentInstruction[1].u.operand;
288     unsigned base = currentInstruction[2].u.operand;
289     unsigned property = currentInstruction[3].u.operand;
290 
291     // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
292     Label callGetByValJITStub(this);
293 
294     linkSlowCase(iter); // property int32 check
295     linkSlowCaseIfNotJSCell(iter, base); // base cell check
296     linkSlowCase(iter); // base array check
297 
298     JITStubCall stubCall(this, cti_op_get_by_val);
299     stubCall.addArgument(base);
300     stubCall.addArgument(property);
301     stubCall.call(dst);
302 
303     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
304 
305     linkSlowCase(iter); // array fast cut-off check
306 
307     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
308     branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), callGetByValJITStub);
309 
310     // Missed the fast region, but it is still in the vector.
311     load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
312     load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
313 
314     // FIXME: Maybe we can optimize this comparison to JSValue().
315     Jump skip = branch32(NotEqual, regT0, Imm32(0));
316     branch32(Equal, regT1, Imm32(JSValue::CellTag), callGetByValJITStub);
317 
318     skip.link(this);
319     emitStore(dst, regT1, regT0);
320 }
321 
emit_op_put_by_val(Instruction * currentInstruction)322 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
323 {
324     unsigned base = currentInstruction[1].u.operand;
325     unsigned property = currentInstruction[2].u.operand;
326     unsigned value = currentInstruction[3].u.operand;
327 
328     emitLoad2(base, regT1, regT0, property, regT3, regT2);
329 
330     addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
331     emitJumpSlowCaseIfNotJSCell(base, regT1);
332     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
333     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
334 
335     Jump inFastVector = branch32(Below, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
336 
337     // Check if the access is within the vector.
338     addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
339 
340     // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
341     // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
342     Jump skip = branch32(NotEqual, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::CellTag));
343     addSlowCase(branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), Imm32(0)));
344     skip.link(this);
345 
346     inFastVector.link(this);
347 
348     emitLoad(value, regT1, regT0);
349     store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
350     store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
351 }
352 
emitSlow_op_put_by_val(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)353 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
354 {
355     unsigned base = currentInstruction[1].u.operand;
356     unsigned property = currentInstruction[2].u.operand;
357     unsigned value = currentInstruction[3].u.operand;
358 
359     linkSlowCase(iter); // property int32 check
360     linkSlowCaseIfNotJSCell(iter, base); // base cell check
361     linkSlowCase(iter); // base not array check
362 
363     JITStubCall stubPutByValCall(this, cti_op_put_by_val);
364     stubPutByValCall.addArgument(base);
365     stubPutByValCall.addArgument(property);
366     stubPutByValCall.addArgument(value);
367     stubPutByValCall.call();
368 
369     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
370 
371     // Slow cases for immediate int accesses to arrays.
372     linkSlowCase(iter); // in vector check
373     linkSlowCase(iter); // written to slot check
374 
375     JITStubCall stubCall(this, cti_op_put_by_val_array);
376     stubCall.addArgument(regT1, regT0);
377     stubCall.addArgument(regT2);
378     stubCall.addArgument(value);
379     stubCall.call();
380 }
381 
emit_op_get_by_id(Instruction * currentInstruction)382 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
383 {
384     int dst = currentInstruction[1].u.operand;
385     int base = currentInstruction[2].u.operand;
386 
387     emitLoad(base, regT1, regT0);
388     emitJumpSlowCaseIfNotJSCell(base, regT1);
389     compileGetByIdHotPath();
390     emitStore(dst, regT1, regT0);
391     map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
392 }
393 
compileGetByIdHotPath()394 void JIT::compileGetByIdHotPath()
395 {
396     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
397     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
398     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
399     // to jump back to if one of these trampolies finds a match.
400     Label hotPathBegin(this);
401     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
402     m_propertyAccessInstructionIndex++;
403 
404     DataLabelPtr structureToCompare;
405     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
406     addSlowCase(structureCheck);
407     ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
408     ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
409 
410     Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
411     Label externalLoadComplete(this);
412     ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
413     ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
414 
415     DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
416     ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
417     DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
418     ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
419 
420     Label putResult(this);
421     ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
422 }
423 
emitSlow_op_get_by_id(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)424 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
425 {
426     int dst = currentInstruction[1].u.operand;
427     int base = currentInstruction[2].u.operand;
428     int ident = currentInstruction[3].u.operand;
429 
430     compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
431 }
432 
compileGetByIdSlowCase(int dst,int base,Identifier * ident,Vector<SlowCaseEntry>::iterator & iter,bool isMethodCheck)433 void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
434 {
435     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
436     // so that we only need track one pointer into the slow case code - we track a pointer to the location
437     // of the call (which we can use to look up the patch information), but should a array-length or
438     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
439     // the distance from the call to the head of the slow case.
440     linkSlowCaseIfNotJSCell(iter, base);
441     linkSlowCase(iter);
442 
443     Label coldPathBegin(this);
444 
445     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
446     stubCall.addArgument(regT1, regT0);
447     stubCall.addArgument(ImmPtr(ident));
448     Call call = stubCall.call(dst);
449 
450     ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
451 
452     // Track the location of the call; this will be used to recover patch information.
453     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
454     m_propertyAccessInstructionIndex++;
455 }
456 
emit_op_put_by_id(Instruction * currentInstruction)457 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
458 {
459     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
460     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
461     // such that the Structure & offset are always at the same distance from this.
462 
463     int base = currentInstruction[1].u.operand;
464     int value = currentInstruction[3].u.operand;
465 
466     emitLoad2(base, regT1, regT0, value, regT3, regT2);
467 
468     emitJumpSlowCaseIfNotJSCell(base, regT1);
469 
470     Label hotPathBegin(this);
471     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
472     m_propertyAccessInstructionIndex++;
473 
474     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
475     DataLabelPtr structureToCompare;
476     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
477     ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
478 
479     // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
480     Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
481     Label externalLoadComplete(this);
482     ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
483     ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
484 
485     DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
486     DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
487     ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
488     ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
489 }
490 
emitSlow_op_put_by_id(Instruction * currentInstruction,Vector<SlowCaseEntry>::iterator & iter)491 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
492 {
493     int base = currentInstruction[1].u.operand;
494     int ident = currentInstruction[2].u.operand;
495 
496     linkSlowCaseIfNotJSCell(iter, base);
497     linkSlowCase(iter);
498 
499     JITStubCall stubCall(this, cti_op_put_by_id);
500     stubCall.addArgument(regT1, regT0);
501     stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
502     stubCall.addArgument(regT3, regT2);
503     Call call = stubCall.call();
504 
505     // Track the location of the call; this will be used to recover patch information.
506     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
507     m_propertyAccessInstructionIndex++;
508 }
509 
510 // Compile a store into an object's property storage.  May overwrite base.
compilePutDirectOffset(RegisterID base,RegisterID valueTag,RegisterID valuePayload,Structure * structure,size_t cachedOffset)511 void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
512 {
513     int offset = cachedOffset;
514     if (structure->isUsingInlineStorage())
515         offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) /  sizeof(Register);
516     else
517         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
518     emitStore(offset, valueTag, valuePayload, base);
519 }
520 
521 // Compile a load from an object's property storage.  May overwrite base.
compileGetDirectOffset(RegisterID base,RegisterID resultTag,RegisterID resultPayload,Structure * structure,size_t cachedOffset)522 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
523 {
524     int offset = cachedOffset;
525     if (structure->isUsingInlineStorage())
526         offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
527     else
528         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
529     emitLoad(offset, resultTag, resultPayload, base);
530 }
531 
compileGetDirectOffset(JSObject * base,RegisterID temp,RegisterID resultTag,RegisterID resultPayload,size_t cachedOffset)532 void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
533 {
534     if (base->isUsingInlineStorage()) {
535         load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
536         load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
537         return;
538     }
539 
540     size_t offset = cachedOffset * sizeof(JSValue);
541 
542     PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
543     loadPtr(static_cast<void*>(protoPropertyStorage), temp);
544     load32(Address(temp, offset), resultPayload);
545     load32(Address(temp, offset + 4), resultTag);
546 }
547 
privateCompilePutByIdTransition(StructureStubInfo * stubInfo,Structure * oldStructure,Structure * newStructure,size_t cachedOffset,StructureChain * chain,ReturnAddressPtr returnAddress)548 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
549 {
550     // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag.  The value can be found on the stack.
551 
552     JumpList failureCases;
553     failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
554 
555     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
556     failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(oldStructure)));
557 
558     // Verify that nothing in the prototype chain has a setter for this property.
559     for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
560         loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
561         loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
562         failureCases.append(branchPtr(NotEqual, regT2, ImmPtr(it->get())));
563     }
564 
565     // Reallocate property storage if needed.
566     Call callTarget;
567     bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
568     if (willNeedStorageRealloc) {
569         // This trampoline was called to like a JIT stub; before we can can call again we need to
570         // remove the return address from the stack, to prevent the stack from becoming misaligned.
571         preserveReturnAddressAfterCall(regT3);
572 
573         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
574         stubCall.skipArgument(); // base
575         stubCall.skipArgument(); // ident
576         stubCall.skipArgument(); // value
577         stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
578         stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
579         stubCall.call(regT0);
580 
581         restoreReturnAddressBeforeReturn(regT3);
582     }
583 
584     sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
585     add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
586     storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
587 
588     load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
589     load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
590 
591     // Write the value
592     compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
593 
594     ret();
595 
596     ASSERT(!failureCases.empty());
597     failureCases.link(this);
598     restoreArgumentReferenceForTrampoline();
599     Call failureCall = tailRecursiveCall();
600 
601     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
602 
603     patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
604 
605     if (willNeedStorageRealloc) {
606         ASSERT(m_calls.size() == 1);
607         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
608     }
609 
610     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
611     stubInfo->stubRoutine = entryLabel;
612     RepatchBuffer repatchBuffer(m_codeBlock);
613     repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
614 }
615 
patchGetByIdSelf(CodeBlock * codeBlock,StructureStubInfo * stubInfo,Structure * structure,size_t cachedOffset,ReturnAddressPtr returnAddress)616 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
617 {
618     RepatchBuffer repatchBuffer(codeBlock);
619 
620     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
621     // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
622     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
623 
624     int offset = sizeof(JSValue) * cachedOffset;
625 
626     // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
627     // and makes the subsequent load's offset automatically correct
628     if (structure->isUsingInlineStorage())
629         repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
630 
631     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
632     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
633     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
634     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
635 }
636 
patchMethodCallProto(CodeBlock * codeBlock,MethodCallLinkInfo & methodCallLinkInfo,JSFunction * callee,Structure * structure,JSObject * proto,ReturnAddressPtr returnAddress)637 void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
638 {
639     RepatchBuffer repatchBuffer(codeBlock);
640 
641     ASSERT(!methodCallLinkInfo.cachedStructure);
642     methodCallLinkInfo.cachedStructure = structure;
643     structure->ref();
644 
645     Structure* prototypeStructure = proto->structure();
646     ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
647     methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
648     prototypeStructure->ref();
649 
650     repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
651     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
652     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
653     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
654 
655     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
656 }
657 
patchPutByIdReplace(CodeBlock * codeBlock,StructureStubInfo * stubInfo,Structure * structure,size_t cachedOffset,ReturnAddressPtr returnAddress)658 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
659 {
660     RepatchBuffer repatchBuffer(codeBlock);
661 
662     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
663     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
664     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
665 
666     int offset = sizeof(JSValue) * cachedOffset;
667 
668     // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
669     // and makes the subsequent load's offset automatically correct
670     if (structure->isUsingInlineStorage())
671         repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
672 
673     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
674     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
675     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
676     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
677 }
678 
privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)679 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
680 {
681     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
682 
683     // regT0 holds a JSCell*
684 
685     // Check for array
686     Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
687 
688     // Checks out okay! - get the length from the storage
689     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
690     load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
691 
692     Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
693     move(regT2, regT0);
694     move(Imm32(JSValue::Int32Tag), regT1);
695     Jump success = jump();
696 
697     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
698 
699     // Use the patch information to link the failure cases back to the original slow case routine.
700     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
701     patchBuffer.link(failureCases1, slowCaseBegin);
702     patchBuffer.link(failureCases2, slowCaseBegin);
703 
704     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
705     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
706 
707     // Track the stub we have created so that it will be deleted later.
708     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
709     stubInfo->stubRoutine = entryLabel;
710 
711     // Finally patch the jump to slow case back in the hot path to jump here instead.
712     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
713     RepatchBuffer repatchBuffer(m_codeBlock);
714     repatchBuffer.relink(jumpLocation, entryLabel);
715 
716     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
717     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
718 }
719 
privateCompileGetByIdProto(StructureStubInfo * stubInfo,Structure * structure,Structure * prototypeStructure,size_t cachedOffset,ReturnAddressPtr returnAddress,CallFrame * callFrame)720 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
721 {
722     // regT0 holds a JSCell*
723 
724     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
725     // referencing the prototype object - let's speculatively load it's table nice and early!)
726     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
727 
728     Jump failureCases1 = checkStructure(regT0, structure);
729 
730     // Check the prototype object's Structure had not changed.
731     Structure** prototypeStructureAddress = &(protoObject->m_structure);
732 #if PLATFORM(X86_64)
733     move(ImmPtr(prototypeStructure), regT3);
734     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
735 #else
736     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
737 #endif
738 
739     // Checks out okay! - getDirectOffset
740     compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
741 
742     Jump success = jump();
743 
744     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
745 
746     // Use the patch information to link the failure cases back to the original slow case routine.
747     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
748     patchBuffer.link(failureCases1, slowCaseBegin);
749     patchBuffer.link(failureCases2, slowCaseBegin);
750 
751     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
752     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
753 
754     // Track the stub we have created so that it will be deleted later.
755     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
756     stubInfo->stubRoutine = entryLabel;
757 
758     // Finally patch the jump to slow case back in the hot path to jump here instead.
759     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
760     RepatchBuffer repatchBuffer(m_codeBlock);
761     repatchBuffer.relink(jumpLocation, entryLabel);
762 
763     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
764     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
765 }
766 
767 
privateCompileGetByIdSelfList(StructureStubInfo * stubInfo,PolymorphicAccessStructureList * polymorphicStructures,int currentIndex,Structure * structure,size_t cachedOffset)768 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
769 {
770     // regT0 holds a JSCell*
771 
772     Jump failureCase = checkStructure(regT0, structure);
773     compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
774     Jump success = jump();
775 
776     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
777 
778     // Use the patch information to link the failure cases back to the original slow case routine.
779     CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
780     if (!lastProtoBegin)
781         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
782 
783     patchBuffer.link(failureCase, lastProtoBegin);
784 
785     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
786     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
787 
788     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
789 
790     structure->ref();
791     polymorphicStructures->list[currentIndex].set(entryLabel, structure);
792 
793     // Finally patch the jump to slow case back in the hot path to jump here instead.
794     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
795     RepatchBuffer repatchBuffer(m_codeBlock);
796     repatchBuffer.relink(jumpLocation, entryLabel);
797 }
798 
privateCompileGetByIdProtoList(StructureStubInfo * stubInfo,PolymorphicAccessStructureList * prototypeStructures,int currentIndex,Structure * structure,Structure * prototypeStructure,size_t cachedOffset,CallFrame * callFrame)799 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
800 {
801     // regT0 holds a JSCell*
802 
803     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
804     // referencing the prototype object - let's speculatively load it's table nice and early!)
805     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
806 
807     // Check eax is an object of the right Structure.
808     Jump failureCases1 = checkStructure(regT0, structure);
809 
810     // Check the prototype object's Structure had not changed.
811     Structure** prototypeStructureAddress = &(protoObject->m_structure);
812 #if PLATFORM(X86_64)
813     move(ImmPtr(prototypeStructure), regT3);
814     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
815 #else
816     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
817 #endif
818 
819     compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
820 
821     Jump success = jump();
822 
823     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
824 
825     // Use the patch information to link the failure cases back to the original slow case routine.
826     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
827     patchBuffer.link(failureCases1, lastProtoBegin);
828     patchBuffer.link(failureCases2, lastProtoBegin);
829 
830     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
831     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
832 
833     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
834 
835     structure->ref();
836     prototypeStructure->ref();
837     prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
838 
839     // Finally patch the jump to slow case back in the hot path to jump here instead.
840     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
841     RepatchBuffer repatchBuffer(m_codeBlock);
842     repatchBuffer.relink(jumpLocation, entryLabel);
843 }
844 
privateCompileGetByIdChainList(StructureStubInfo * stubInfo,PolymorphicAccessStructureList * prototypeStructures,int currentIndex,Structure * structure,StructureChain * chain,size_t count,size_t cachedOffset,CallFrame * callFrame)845 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
846 {
847     // regT0 holds a JSCell*
848 
849     ASSERT(count);
850 
851     JumpList bucketsOfFail;
852 
853     // Check eax is an object of the right Structure.
854     bucketsOfFail.append(checkStructure(regT0, structure));
855 
856     Structure* currStructure = structure;
857     RefPtr<Structure>* chainEntries = chain->head();
858     JSObject* protoObject = 0;
859     for (unsigned i = 0; i < count; ++i) {
860         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
861         currStructure = chainEntries[i].get();
862 
863         // Check the prototype object's Structure had not changed.
864         Structure** prototypeStructureAddress = &(protoObject->m_structure);
865 #if PLATFORM(X86_64)
866         move(ImmPtr(currStructure), regT3);
867         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
868 #else
869         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
870 #endif
871     }
872     ASSERT(protoObject);
873 
874     compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
875     Jump success = jump();
876 
877     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
878 
879     // Use the patch information to link the failure cases back to the original slow case routine.
880     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
881 
882     patchBuffer.link(bucketsOfFail, lastProtoBegin);
883 
884     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
885     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
886 
887     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
888 
889     // Track the stub we have created so that it will be deleted later.
890     structure->ref();
891     chain->ref();
892     prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
893 
894     // Finally patch the jump to slow case back in the hot path to jump here instead.
895     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
896     RepatchBuffer repatchBuffer(m_codeBlock);
897     repatchBuffer.relink(jumpLocation, entryLabel);
898 }
899 
privateCompileGetByIdChain(StructureStubInfo * stubInfo,Structure * structure,StructureChain * chain,size_t count,size_t cachedOffset,ReturnAddressPtr returnAddress,CallFrame * callFrame)900 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
901 {
902     // regT0 holds a JSCell*
903 
904     ASSERT(count);
905 
906     JumpList bucketsOfFail;
907 
908     // Check eax is an object of the right Structure.
909     bucketsOfFail.append(checkStructure(regT0, structure));
910 
911     Structure* currStructure = structure;
912     RefPtr<Structure>* chainEntries = chain->head();
913     JSObject* protoObject = 0;
914     for (unsigned i = 0; i < count; ++i) {
915         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
916         currStructure = chainEntries[i].get();
917 
918         // Check the prototype object's Structure had not changed.
919         Structure** prototypeStructureAddress = &(protoObject->m_structure);
920 #if PLATFORM(X86_64)
921         move(ImmPtr(currStructure), regT3);
922         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
923 #else
924         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
925 #endif
926     }
927     ASSERT(protoObject);
928 
929     compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
930     Jump success = jump();
931 
932     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
933 
934     // Use the patch information to link the failure cases back to the original slow case routine.
935     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
936 
937     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
938     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
939 
940     // Track the stub we have created so that it will be deleted later.
941     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
942     stubInfo->stubRoutine = entryLabel;
943 
944     // Finally patch the jump to slow case back in the hot path to jump here instead.
945     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
946     RepatchBuffer repatchBuffer(m_codeBlock);
947     repatchBuffer.relink(jumpLocation, entryLabel);
948 
949     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
950     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
951 }
952 
953 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
954 
955 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
956 
957 #else // USE(JSVALUE32_64)
958 
959 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
960 {
961     emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
962     emitJumpSlowCaseIfNotImmediateInteger(regT1);
963 #if USE(JSVALUE64)
964     // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
965     // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
966     // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
967     // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
968     // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
969     // extending since it makes it easier to re-tag the value in the slow case.
970     zeroExtend32ToPtr(regT1, regT1);
971 #else
972     emitFastArithImmToInt(regT1);
973 #endif
974     emitJumpSlowCaseIfNotJSCell(regT0);
975     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
976 
977     // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
978     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
979     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
980 
981     // Get the value from the vector
982     loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
983     emitPutVirtualRegister(currentInstruction[1].u.operand);
984 }
985 
986 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
987 {
988     emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
989     emitJumpSlowCaseIfNotImmediateInteger(regT1);
990 #if USE(JSVALUE64)
991     // See comment in op_get_by_val.
992     zeroExtend32ToPtr(regT1, regT1);
993 #else
994     emitFastArithImmToInt(regT1);
995 #endif
996     emitJumpSlowCaseIfNotJSCell(regT0);
997     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
998 
999     // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
1000     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
1001     Jump inFastVector = branch32(Below, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
1002     // No; oh well, check if the access if within the vector - if so, we may still be okay.
1003     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
1004 
1005     // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
1006     // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
1007     addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
1008 
1009     // All good - put the value into the array.
1010     inFastVector.link(this);
1011     emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
1012     storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
1013 }
1014 
1015 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
1016 {
1017     JITStubCall stubCall(this, cti_op_put_by_index);
1018     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1019     stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
1020     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1021     stubCall.call();
1022 }
1023 
1024 void JIT::emit_op_put_getter(Instruction* currentInstruction)
1025 {
1026     JITStubCall stubCall(this, cti_op_put_getter);
1027     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1028     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
1029     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1030     stubCall.call();
1031 }
1032 
1033 void JIT::emit_op_put_setter(Instruction* currentInstruction)
1034 {
1035     JITStubCall stubCall(this, cti_op_put_setter);
1036     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
1037     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
1038     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
1039     stubCall.call();
1040 }
1041 
1042 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
1043 {
1044     JITStubCall stubCall(this, cti_op_del_by_id);
1045     stubCall.addArgument(currentInstruction[2].u.operand, regT2);
1046     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
1047     stubCall.call(currentInstruction[1].u.operand);
1048 }
1049 
1050 
1051 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1052 
1053 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1054 
1055 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1056 void JIT::emit_op_method_check(Instruction*) {}
1057 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
1058 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1059 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
1060 #endif
1061 
1062 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
1063 {
1064     unsigned resultVReg = currentInstruction[1].u.operand;
1065     unsigned baseVReg = currentInstruction[2].u.operand;
1066     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1067 
1068     emitGetVirtualRegister(baseVReg, regT0);
1069     JITStubCall stubCall(this, cti_op_get_by_id_generic);
1070     stubCall.addArgument(regT0);
1071     stubCall.addArgument(ImmPtr(ident));
1072     stubCall.call(resultVReg);
1073 
1074     m_propertyAccessInstructionIndex++;
1075 }
1076 
1077 void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
1078 {
1079     ASSERT_NOT_REACHED();
1080 }
1081 
1082 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
1083 {
1084     unsigned baseVReg = currentInstruction[1].u.operand;
1085     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
1086     unsigned valueVReg = currentInstruction[3].u.operand;
1087 
1088     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
1089 
1090     JITStubCall stubCall(this, cti_op_put_by_id_generic);
1091     stubCall.addArgument(regT0);
1092     stubCall.addArgument(ImmPtr(ident));
1093     stubCall.addArgument(regT1);
1094     stubCall.call();
1095 
1096     m_propertyAccessInstructionIndex++;
1097 }
1098 
1099 void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
1100 {
1101     ASSERT_NOT_REACHED();
1102 }
1103 
1104 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1105 
1106 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1107 
1108 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1109 
1110 void JIT::emit_op_method_check(Instruction* currentInstruction)
1111 {
1112     // Assert that the following instruction is a get_by_id.
1113     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
1114 
1115     currentInstruction += OPCODE_LENGTH(op_method_check);
1116     unsigned resultVReg = currentInstruction[1].u.operand;
1117     unsigned baseVReg = currentInstruction[2].u.operand;
1118     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1119 
1120     emitGetVirtualRegister(baseVReg, regT0);
1121 
1122     // Do the method check - check the object & its prototype's structure inline (this is the common case).
1123     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
1124     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
1125     Jump notCell = emitJumpIfNotJSCell(regT0);
1126     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
1127     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
1128     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
1129 
1130     // This will be relinked to load the function without doing a load.
1131     DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
1132     Jump match = jump();
1133 
1134     ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
1135     ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
1136     ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
1137 
1138     // Link the failure cases here.
1139     notCell.link(this);
1140     structureCheck.link(this);
1141     protoStructureCheck.link(this);
1142 
1143     // Do a regular(ish) get_by_id (the slow case will be link to
1144     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
1145     compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
1146 
1147     match.link(this);
1148     emitPutVirtualRegister(resultVReg);
1149 
1150     // We've already generated the following get_by_id, so make sure it's skipped over.
1151     m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
1152 }
1153 
1154 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1155 {
1156     currentInstruction += OPCODE_LENGTH(op_method_check);
1157     unsigned resultVReg = currentInstruction[1].u.operand;
1158     unsigned baseVReg = currentInstruction[2].u.operand;
1159     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1160 
1161     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
1162 
1163     // We've already generated the following get_by_id, so make sure it's skipped over.
1164     m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
1165 }
1166 
1167 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
1168 
1169 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
1170 void JIT::emit_op_method_check(Instruction*) {}
1171 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
1172 
1173 #endif
1174 
1175 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
1176 {
1177     unsigned resultVReg = currentInstruction[1].u.operand;
1178     unsigned baseVReg = currentInstruction[2].u.operand;
1179     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1180 
1181     emitGetVirtualRegister(baseVReg, regT0);
1182     compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
1183     emitPutVirtualRegister(resultVReg);
1184 }
1185 
1186 void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
1187 {
1188     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
1189     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
1190     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
1191     // to jump back to if one of these trampolies finds a match.
1192 
1193     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
1194 
1195     Label hotPathBegin(this);
1196     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1197 
1198     DataLabelPtr structureToCompare;
1199     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
1200     addSlowCase(structureCheck);
1201     ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
1202     ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
1203 
1204     Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
1205     Label externalLoadComplete(this);
1206     ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
1207     ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
1208 
1209     DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
1210     ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
1211 
1212     Label putResult(this);
1213     ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
1214 }
1215 
1216 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1217 {
1218     unsigned resultVReg = currentInstruction[1].u.operand;
1219     unsigned baseVReg = currentInstruction[2].u.operand;
1220     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
1221 
1222     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
1223 }
1224 
1225 void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
1226 {
1227     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
1228     // so that we only need track one pointer into the slow case code - we track a pointer to the location
1229     // of the call (which we can use to look up the patch information), but should a array-length or
1230     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
1231     // the distance from the call to the head of the slow case.
1232 
1233     linkSlowCaseIfNotJSCell(iter, baseVReg);
1234     linkSlowCase(iter);
1235 
1236 #ifndef NDEBUG
1237     Label coldPathBegin(this);
1238 #endif
1239     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
1240     stubCall.addArgument(regT0);
1241     stubCall.addArgument(ImmPtr(ident));
1242     Call call = stubCall.call(resultVReg);
1243 
1244     ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
1245 
1246     // Track the location of the call; this will be used to recover patch information.
1247     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
1248     m_propertyAccessInstructionIndex++;
1249 }
1250 
1251 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
1252 {
1253     unsigned baseVReg = currentInstruction[1].u.operand;
1254     unsigned valueVReg = currentInstruction[3].u.operand;
1255 
1256     unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
1257 
1258     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
1259     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
1260     // such that the Structure & offset are always at the same distance from this.
1261 
1262     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
1263 
1264     // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
1265     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
1266 
1267     Label hotPathBegin(this);
1268     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
1269 
1270     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
1271     DataLabelPtr structureToCompare;
1272     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
1273     ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
1274 
1275     // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
1276     Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
1277     Label externalLoadComplete(this);
1278     ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
1279     ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
1280 
1281     DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
1282     ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
1283 }
1284 
1285 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1286 {
1287     unsigned baseVReg = currentInstruction[1].u.operand;
1288     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
1289 
1290     unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
1291 
1292     linkSlowCaseIfNotJSCell(iter, baseVReg);
1293     linkSlowCase(iter);
1294 
1295     JITStubCall stubCall(this, cti_op_put_by_id);
1296     stubCall.addArgument(regT0);
1297     stubCall.addArgument(ImmPtr(ident));
1298     stubCall.addArgument(regT1);
1299     Call call = stubCall.call();
1300 
1301     // Track the location of the call; this will be used to recover patch information.
1302     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
1303 }
1304 
1305 // Compile a store into an object's property storage.  May overwrite the
1306 // value in objectReg.
1307 void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
1308 {
1309     int offset = cachedOffset * sizeof(JSValue);
1310     if (structure->isUsingInlineStorage())
1311         offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
1312     else
1313         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
1314     storePtr(value, Address(base, offset));
1315 }
1316 
1317 // Compile a load from an object's property storage.  May overwrite base.
1318 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
1319 {
1320     int offset = cachedOffset * sizeof(JSValue);
1321     if (structure->isUsingInlineStorage())
1322         offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
1323     else
1324         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
1325     loadPtr(Address(base, offset), result);
1326 }
1327 
1328 void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
1329 {
1330     if (base->isUsingInlineStorage())
1331         loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
1332     else {
1333         PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
1334         loadPtr(static_cast<void*>(protoPropertyStorage), temp);
1335         loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
1336     }
1337 }
1338 
1339 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
1340 {
1341     JumpList failureCases;
1342     // Check eax is an object of the right Structure.
1343     failureCases.append(emitJumpIfNotJSCell(regT0));
1344     failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
1345     JumpList successCases;
1346 
1347     // ecx = baseObject
1348     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
1349     // proto(ecx) = baseObject->structure()->prototype()
1350     failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
1351 
1352     loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
1353 
1354     // ecx = baseObject->m_structure
1355     for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
1356         // null check the prototype
1357         successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
1358 
1359         // Check the structure id
1360         failureCases.append(branchPtr(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(it->get())));
1361 
1362         loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
1363         failureCases.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
1364         loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
1365     }
1366 
1367     successCases.link(this);
1368 
1369     Call callTarget;
1370 
1371     // emit a call only if storage realloc is needed
1372     bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
1373     if (willNeedStorageRealloc) {
1374         // This trampoline was called to like a JIT stub; before we can can call again we need to
1375         // remove the return address from the stack, to prevent the stack from becoming misaligned.
1376         preserveReturnAddressAfterCall(regT3);
1377 
1378         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
1379         stubCall.skipArgument(); // base
1380         stubCall.skipArgument(); // ident
1381         stubCall.skipArgument(); // value
1382         stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
1383         stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
1384         stubCall.call(regT0);
1385         emitGetJITStubArg(3, regT1);
1386 
1387         restoreReturnAddressBeforeReturn(regT3);
1388     }
1389 
1390     // Assumes m_refCount can be decremented easily, refcount decrement is safe as
1391     // codeblock should ensure oldStructure->m_refCount > 0
1392     sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
1393     add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
1394     storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
1395 
1396     // write the value
1397     compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
1398 
1399     ret();
1400 
1401     ASSERT(!failureCases.empty());
1402     failureCases.link(this);
1403     restoreArgumentReferenceForTrampoline();
1404     Call failureCall = tailRecursiveCall();
1405 
1406     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1407 
1408     patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
1409 
1410     if (willNeedStorageRealloc) {
1411         ASSERT(m_calls.size() == 1);
1412         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
1413     }
1414 
1415     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1416     stubInfo->stubRoutine = entryLabel;
1417     RepatchBuffer repatchBuffer(m_codeBlock);
1418     repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
1419 }
1420 
1421 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
1422 {
1423     RepatchBuffer repatchBuffer(codeBlock);
1424 
1425     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
1426     // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
1427     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
1428 
1429     int offset = sizeof(JSValue) * cachedOffset;
1430 
1431     // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1432     // and makes the subsequent load's offset automatically correct
1433     if (structure->isUsingInlineStorage())
1434         repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
1435 
1436     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
1437     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
1438     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
1439 }
1440 
1441 void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
1442 {
1443     RepatchBuffer repatchBuffer(codeBlock);
1444 
1445     ASSERT(!methodCallLinkInfo.cachedStructure);
1446     methodCallLinkInfo.cachedStructure = structure;
1447     structure->ref();
1448 
1449     Structure* prototypeStructure = proto->structure();
1450     ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
1451     methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
1452     prototypeStructure->ref();
1453 
1454     repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
1455     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
1456     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
1457     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
1458 
1459     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
1460 }
1461 
1462 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
1463 {
1464     RepatchBuffer repatchBuffer(codeBlock);
1465 
1466     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1467     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
1468     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
1469 
1470     int offset = sizeof(JSValue) * cachedOffset;
1471 
1472     // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
1473     // and makes the subsequent load's offset automatically correct
1474     if (structure->isUsingInlineStorage())
1475         repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
1476 
1477     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
1478     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
1479     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
1480 }
1481 
1482 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
1483 {
1484     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
1485 
1486     // Check eax is an array
1487     Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
1488 
1489     // Checks out okay! - get the length from the storage
1490     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
1491     load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
1492 
1493     Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
1494 
1495     emitFastArithIntToImmNoCheck(regT2, regT0);
1496     Jump success = jump();
1497 
1498     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1499 
1500     // Use the patch information to link the failure cases back to the original slow case routine.
1501     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
1502     patchBuffer.link(failureCases1, slowCaseBegin);
1503     patchBuffer.link(failureCases2, slowCaseBegin);
1504 
1505     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1506     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1507 
1508     // Track the stub we have created so that it will be deleted later.
1509     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1510     stubInfo->stubRoutine = entryLabel;
1511 
1512     // Finally patch the jump to slow case back in the hot path to jump here instead.
1513     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1514     RepatchBuffer repatchBuffer(m_codeBlock);
1515     repatchBuffer.relink(jumpLocation, entryLabel);
1516 
1517     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1518     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
1519 }
1520 
1521 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
1522 {
1523     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1524     // referencing the prototype object - let's speculatively load it's table nice and early!)
1525     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
1526 
1527     // Check eax is an object of the right Structure.
1528     Jump failureCases1 = checkStructure(regT0, structure);
1529 
1530     // Check the prototype object's Structure had not changed.
1531     Structure** prototypeStructureAddress = &(protoObject->m_structure);
1532 #if PLATFORM(X86_64)
1533     move(ImmPtr(prototypeStructure), regT3);
1534     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
1535 #else
1536     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
1537 #endif
1538 
1539     // Checks out okay! - getDirectOffset
1540     compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1541 
1542     Jump success = jump();
1543 
1544     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1545 
1546     // Use the patch information to link the failure cases back to the original slow case routine.
1547     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
1548     patchBuffer.link(failureCases1, slowCaseBegin);
1549     patchBuffer.link(failureCases2, slowCaseBegin);
1550 
1551     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1552     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1553 
1554     // Track the stub we have created so that it will be deleted later.
1555     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1556     stubInfo->stubRoutine = entryLabel;
1557 
1558     // Finally patch the jump to slow case back in the hot path to jump here instead.
1559     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1560     RepatchBuffer repatchBuffer(m_codeBlock);
1561     repatchBuffer.relink(jumpLocation, entryLabel);
1562 
1563     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1564     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
1565 }
1566 
1567 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
1568 {
1569     Jump failureCase = checkStructure(regT0, structure);
1570     compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
1571     Jump success = jump();
1572 
1573     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1574 
1575     // Use the patch information to link the failure cases back to the original slow case routine.
1576     CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
1577     if (!lastProtoBegin)
1578         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
1579 
1580     patchBuffer.link(failureCase, lastProtoBegin);
1581 
1582     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1583     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1584 
1585     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1586 
1587     structure->ref();
1588     polymorphicStructures->list[currentIndex].set(entryLabel, structure);
1589 
1590     // Finally patch the jump to slow case back in the hot path to jump here instead.
1591     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1592     RepatchBuffer repatchBuffer(m_codeBlock);
1593     repatchBuffer.relink(jumpLocation, entryLabel);
1594 }
1595 
1596 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
1597 {
1598     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
1599     // referencing the prototype object - let's speculatively load it's table nice and early!)
1600     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
1601 
1602     // Check eax is an object of the right Structure.
1603     Jump failureCases1 = checkStructure(regT0, structure);
1604 
1605     // Check the prototype object's Structure had not changed.
1606     Structure** prototypeStructureAddress = &(protoObject->m_structure);
1607 #if PLATFORM(X86_64)
1608     move(ImmPtr(prototypeStructure), regT3);
1609     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
1610 #else
1611     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
1612 #endif
1613 
1614     // Checks out okay! - getDirectOffset
1615     compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1616 
1617     Jump success = jump();
1618 
1619     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1620 
1621     // Use the patch information to link the failure cases back to the original slow case routine.
1622     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
1623     patchBuffer.link(failureCases1, lastProtoBegin);
1624     patchBuffer.link(failureCases2, lastProtoBegin);
1625 
1626     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1627     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1628 
1629     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1630 
1631     structure->ref();
1632     prototypeStructure->ref();
1633     prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
1634 
1635     // Finally patch the jump to slow case back in the hot path to jump here instead.
1636     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1637     RepatchBuffer repatchBuffer(m_codeBlock);
1638     repatchBuffer.relink(jumpLocation, entryLabel);
1639 }
1640 
1641 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
1642 {
1643     ASSERT(count);
1644 
1645     JumpList bucketsOfFail;
1646 
1647     // Check eax is an object of the right Structure.
1648     Jump baseObjectCheck = checkStructure(regT0, structure);
1649     bucketsOfFail.append(baseObjectCheck);
1650 
1651     Structure* currStructure = structure;
1652     RefPtr<Structure>* chainEntries = chain->head();
1653     JSObject* protoObject = 0;
1654     for (unsigned i = 0; i < count; ++i) {
1655         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
1656         currStructure = chainEntries[i].get();
1657 
1658         // Check the prototype object's Structure had not changed.
1659         Structure** prototypeStructureAddress = &(protoObject->m_structure);
1660 #if PLATFORM(X86_64)
1661         move(ImmPtr(currStructure), regT3);
1662         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
1663 #else
1664         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
1665 #endif
1666     }
1667     ASSERT(protoObject);
1668 
1669     compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1670     Jump success = jump();
1671 
1672     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1673 
1674     // Use the patch information to link the failure cases back to the original slow case routine.
1675     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
1676 
1677     patchBuffer.link(bucketsOfFail, lastProtoBegin);
1678 
1679     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1680     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1681 
1682     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1683 
1684     // Track the stub we have created so that it will be deleted later.
1685     structure->ref();
1686     chain->ref();
1687     prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
1688 
1689     // Finally patch the jump to slow case back in the hot path to jump here instead.
1690     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1691     RepatchBuffer repatchBuffer(m_codeBlock);
1692     repatchBuffer.relink(jumpLocation, entryLabel);
1693 }
1694 
1695 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
1696 {
1697     ASSERT(count);
1698 
1699     JumpList bucketsOfFail;
1700 
1701     // Check eax is an object of the right Structure.
1702     bucketsOfFail.append(checkStructure(regT0, structure));
1703 
1704     Structure* currStructure = structure;
1705     RefPtr<Structure>* chainEntries = chain->head();
1706     JSObject* protoObject = 0;
1707     for (unsigned i = 0; i < count; ++i) {
1708         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
1709         currStructure = chainEntries[i].get();
1710 
1711         // Check the prototype object's Structure had not changed.
1712         Structure** prototypeStructureAddress = &(protoObject->m_structure);
1713 #if PLATFORM(X86_64)
1714         move(ImmPtr(currStructure), regT3);
1715         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
1716 #else
1717         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
1718 #endif
1719     }
1720     ASSERT(protoObject);
1721 
1722     compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
1723     Jump success = jump();
1724 
1725     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
1726 
1727     // Use the patch information to link the failure cases back to the original slow case routine.
1728     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
1729 
1730     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
1731     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
1732 
1733     // Track the stub we have created so that it will be deleted later.
1734     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
1735     stubInfo->stubRoutine = entryLabel;
1736 
1737     // Finally patch the jump to slow case back in the hot path to jump here instead.
1738     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
1739     RepatchBuffer repatchBuffer(m_codeBlock);
1740     repatchBuffer.relink(jumpLocation, entryLabel);
1741 
1742     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
1743     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
1744 }
1745 
1746 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
1747 
1748 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
1749 
1750 #endif // USE(JSVALUE32_64)
1751 
1752 } // namespace JSC
1753 
1754 #endif // ENABLE(JIT)
1755