• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/mips/codegen-mips.h"
6 
7 #if V8_TARGET_ARCH_MIPS
8 
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 #include "src/mips/simulator-mips.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 #define __ masm.
18 
19 
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_mips_machine_code = nullptr;
fast_exp_simulator(double x,Isolate * isolate)22 double fast_exp_simulator(double x, Isolate* isolate) {
23   return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
24 }
25 #endif
26 
27 
CreateExpFunction(Isolate * isolate)28 UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
29   size_t actual_size;
30   byte* buffer =
31       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
32   if (buffer == nullptr) return nullptr;
33   ExternalReference::InitializeMathExpData();
34 
35   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
36                       CodeObjectRequired::kNo);
37 
38   {
39     DoubleRegister input = f12;
40     DoubleRegister result = f0;
41     DoubleRegister double_scratch1 = f4;
42     DoubleRegister double_scratch2 = f6;
43     Register temp1 = t0;
44     Register temp2 = t1;
45     Register temp3 = t2;
46 
47     __ MovFromFloatParameter(input);
48     __ Push(temp3, temp2, temp1);
49     MathExpGenerator::EmitMathExp(
50         &masm, input, result, double_scratch1, double_scratch2,
51         temp1, temp2, temp3);
52     __ Pop(temp3, temp2, temp1);
53     __ MovToFloatResult(result);
54     __ Ret();
55   }
56 
57   CodeDesc desc;
58   masm.GetCode(&desc);
59   DCHECK(!RelocInfo::RequiresRelocation(desc));
60 
61   Assembler::FlushICache(isolate, buffer, actual_size);
62   base::OS::ProtectCode(buffer, actual_size);
63 
64 #if !defined(USE_SIMULATOR)
65   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
66 #else
67   fast_exp_mips_machine_code = buffer;
68   return &fast_exp_simulator;
69 #endif
70 }
71 
72 
73 #if defined(V8_HOST_ARCH_MIPS)
CreateMemCopyUint8Function(Isolate * isolate,MemCopyUint8Function stub)74 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
75                                                 MemCopyUint8Function stub) {
76 #if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
77     defined(_MIPS_ARCH_MIPS32RX)
78   return stub;
79 #else
80   size_t actual_size;
81   byte* buffer =
82       static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
83   if (buffer == nullptr) return stub;
84 
85   // This code assumes that cache lines are 32 bytes and if the cache line is
86   // larger it will not work correctly.
87   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
88                       CodeObjectRequired::kNo);
89 
90   {
91     Label lastb, unaligned, aligned, chkw,
92           loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
93           leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
94           ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
95 
96     // The size of each prefetch.
97     uint32_t pref_chunk = 32;
98     // The maximum size of a prefetch, it must not be less then pref_chunk.
99     // If the real size of a prefetch is greater then max_pref_size and
100     // the kPrefHintPrepareForStore hint is used, the code will not work
101     // correctly.
102     uint32_t max_pref_size = 128;
103     DCHECK(pref_chunk < max_pref_size);
104 
105     // pref_limit is set based on the fact that we never use an offset
106     // greater then 5 on a store pref and that a single pref can
107     // never be larger then max_pref_size.
108     uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
109     int32_t pref_hint_load = kPrefHintLoadStreamed;
110     int32_t pref_hint_store = kPrefHintPrepareForStore;
111     uint32_t loadstore_chunk = 4;
112 
113     // The initial prefetches may fetch bytes that are before the buffer being
114     // copied. Start copies with an offset of 4 so avoid this situation when
115     // using kPrefHintPrepareForStore.
116     DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
117            pref_chunk * 4 >= max_pref_size);
118 
119     // If the size is less than 8, go to lastb. Regardless of size,
120     // copy dst pointer to v0 for the retuen value.
121     __ slti(t2, a2, 2 * loadstore_chunk);
122     __ bne(t2, zero_reg, &lastb);
123     __ mov(v0, a0);  // In delay slot.
124 
125     // If src and dst have different alignments, go to unaligned, if they
126     // have the same alignment (but are not actually aligned) do a partial
127     // load/store to make them aligned. If they are both already aligned
128     // we can start copying at aligned.
129     __ xor_(t8, a1, a0);
130     __ andi(t8, t8, loadstore_chunk - 1);  // t8 is a0/a1 word-displacement.
131     __ bne(t8, zero_reg, &unaligned);
132     __ subu(a3, zero_reg, a0);  // In delay slot.
133 
134     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
135     __ beq(a3, zero_reg, &aligned);  // Already aligned.
136     __ subu(a2, a2, a3);  // In delay slot. a2 is the remining bytes count.
137 
138     if (kArchEndian == kLittle) {
139       __ lwr(t8, MemOperand(a1));
140       __ addu(a1, a1, a3);
141       __ swr(t8, MemOperand(a0));
142       __ addu(a0, a0, a3);
143     } else {
144       __ lwl(t8, MemOperand(a1));
145       __ addu(a1, a1, a3);
146       __ swl(t8, MemOperand(a0));
147       __ addu(a0, a0, a3);
148     }
149     // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
150     // count how many bytes we have to copy after all the 64 byte chunks are
151     // copied and a3 to the dst pointer after all the 64 byte chunks have been
152     // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
153     __ bind(&aligned);
154     __ andi(t8, a2, 0x3f);
155     __ beq(a2, t8, &chkw);  // Less than 64?
156     __ subu(a3, a2, t8);  // In delay slot.
157     __ addu(a3, a0, a3);  // Now a3 is the final dst after loop.
158 
159     // When in the loop we prefetch with kPrefHintPrepareForStore hint,
160     // in this case the a0+x should be past the "t0-32" address. This means:
161     // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
162     // x=64 the last "safe" a0 address is "t0-96". In the current version we
163     // will use "pref hint, 128(a0)", so "t0-160" is the limit.
164     if (pref_hint_store == kPrefHintPrepareForStore) {
165       __ addu(t0, a0, a2);  // t0 is the "past the end" address.
166       __ Subu(t9, t0, pref_limit);  // t9 is the "last safe pref" address.
167     }
168 
169     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
170     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
171     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
172     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
173 
174     if (pref_hint_store != kPrefHintPrepareForStore) {
175       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
176       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
177       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
178     }
179     __ bind(&loop16w);
180     __ lw(t0, MemOperand(a1));
181 
182     if (pref_hint_store == kPrefHintPrepareForStore) {
183       __ sltu(v1, t9, a0);  // If a0 > t9, don't use next prefetch.
184       __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
185     }
186     __ lw(t1, MemOperand(a1, 1, loadstore_chunk));  // Maybe in delay slot.
187 
188     __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
189     __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
190 
191     __ bind(&skip_pref);
192     __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
193     __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
194     __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
195     __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
196     __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
197     __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
198     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
199 
200     __ sw(t0, MemOperand(a0));
201     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
202     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
203     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
204     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
205     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
206     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
207     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
208 
209     __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
210     __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
211     __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
212     __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
213     __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
214     __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
215     __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
216     __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
217     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
218 
219     __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
220     __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
221     __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
222     __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
223     __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
224     __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
225     __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
226     __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
227     __ addiu(a0, a0, 16 * loadstore_chunk);
228     __ bne(a0, a3, &loop16w);
229     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
230     __ mov(a2, t8);
231 
232     // Here we have src and dest word-aligned but less than 64-bytes to go.
233     // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
234     // down to chk1w to handle the tail end of the copy.
235     __ bind(&chkw);
236     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
237     __ andi(t8, a2, 0x1f);
238     __ beq(a2, t8, &chk1w);  // Less than 32?
239     __ nop();  // In delay slot.
240     __ lw(t0, MemOperand(a1));
241     __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
242     __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
243     __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
244     __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
245     __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
246     __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
247     __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
248     __ addiu(a1, a1, 8 * loadstore_chunk);
249     __ sw(t0, MemOperand(a0));
250     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
251     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
252     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
253     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
254     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
255     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
256     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
257     __ addiu(a0, a0, 8 * loadstore_chunk);
258 
259     // Here we have less than 32 bytes to copy. Set up for a loop to copy
260     // one word at a time. Set a2 to count how many bytes we have to copy
261     // after all the word chunks are copied and a3 to the dst pointer after
262     // all the word chunks have been copied. We will loop, incrementing a0
263     // and a1 untill a0 equals a3.
264     __ bind(&chk1w);
265     __ andi(a2, t8, loadstore_chunk - 1);
266     __ beq(a2, t8, &lastb);
267     __ subu(a3, t8, a2);  // In delay slot.
268     __ addu(a3, a0, a3);
269 
270     __ bind(&wordCopy_loop);
271     __ lw(t3, MemOperand(a1));
272     __ addiu(a0, a0, loadstore_chunk);
273     __ addiu(a1, a1, loadstore_chunk);
274     __ bne(a0, a3, &wordCopy_loop);
275     __ sw(t3, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
276 
277     __ bind(&lastb);
278     __ Branch(&leave, le, a2, Operand(zero_reg));
279     __ addu(a3, a0, a2);
280 
281     __ bind(&lastbloop);
282     __ lb(v1, MemOperand(a1));
283     __ addiu(a0, a0, 1);
284     __ addiu(a1, a1, 1);
285     __ bne(a0, a3, &lastbloop);
286     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
287 
288     __ bind(&leave);
289     __ jr(ra);
290     __ nop();
291 
292     // Unaligned case. Only the dst gets aligned so we need to do partial
293     // loads of the source followed by normal stores to the dst (once we
294     // have aligned the destination).
295     __ bind(&unaligned);
296     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
297     __ beq(a3, zero_reg, &ua_chk16w);
298     __ subu(a2, a2, a3);  // In delay slot.
299 
300     if (kArchEndian == kLittle) {
301       __ lwr(v1, MemOperand(a1));
302       __ lwl(v1,
303              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
304       __ addu(a1, a1, a3);
305       __ swr(v1, MemOperand(a0));
306       __ addu(a0, a0, a3);
307     } else {
308       __ lwl(v1, MemOperand(a1));
309       __ lwr(v1,
310              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
311       __ addu(a1, a1, a3);
312       __ swl(v1, MemOperand(a0));
313       __ addu(a0, a0, a3);
314     }
315 
316     // Now the dst (but not the source) is aligned. Set a2 to count how many
317     // bytes we have to copy after all the 64 byte chunks are copied and a3 to
318     // the dst pointer after all the 64 byte chunks have been copied. We will
319     // loop, incrementing a0 and a1 until a0 equals a3.
320     __ bind(&ua_chk16w);
321     __ andi(t8, a2, 0x3f);
322     __ beq(a2, t8, &ua_chkw);
323     __ subu(a3, a2, t8);  // In delay slot.
324     __ addu(a3, a0, a3);
325 
326     if (pref_hint_store == kPrefHintPrepareForStore) {
327       __ addu(t0, a0, a2);
328       __ Subu(t9, t0, pref_limit);
329     }
330 
331     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
332     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
333     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
334 
335     if (pref_hint_store != kPrefHintPrepareForStore) {
336       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
337       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
338       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
339     }
340 
341     __ bind(&ua_loop16w);
342     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
343     if (kArchEndian == kLittle) {
344       __ lwr(t0, MemOperand(a1));
345       __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
346       __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
347 
348       if (pref_hint_store == kPrefHintPrepareForStore) {
349         __ sltu(v1, t9, a0);
350         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
351       }
352       __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
353 
354       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
355       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
356 
357       __ bind(&ua_skip_pref);
358       __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
359       __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
360       __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
361       __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
362       __ lwl(t0,
363              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
364       __ lwl(t1,
365              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
366       __ lwl(t2,
367              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
368       __ lwl(t3,
369              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
370       __ lwl(t4,
371              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
372       __ lwl(t5,
373              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
374       __ lwl(t6,
375              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
376       __ lwl(t7,
377              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
378     } else {
379       __ lwl(t0, MemOperand(a1));
380       __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
381       __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
382 
383       if (pref_hint_store == kPrefHintPrepareForStore) {
384         __ sltu(v1, t9, a0);
385         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
386       }
387       __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
388 
389       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
390       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
391 
392       __ bind(&ua_skip_pref);
393       __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
394       __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
395       __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
396       __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
397       __ lwr(t0,
398              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
399       __ lwr(t1,
400              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
401       __ lwr(t2,
402              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
403       __ lwr(t3,
404              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
405       __ lwr(t4,
406              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
407       __ lwr(t5,
408              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
409       __ lwr(t6,
410              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
411       __ lwr(t7,
412              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
413     }
414     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
415     __ sw(t0, MemOperand(a0));
416     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
417     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
418     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
419     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
420     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
421     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
422     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
423     if (kArchEndian == kLittle) {
424       __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
425       __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
426       __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
427       __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
428       __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
429       __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
430       __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
431       __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
432       __ lwl(t0,
433              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
434       __ lwl(t1,
435              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
436       __ lwl(t2,
437              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
438       __ lwl(t3,
439              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
440       __ lwl(t4,
441              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
442       __ lwl(t5,
443              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
444       __ lwl(t6,
445              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
446       __ lwl(t7,
447              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
448     } else {
449       __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
450       __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
451       __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
452       __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
453       __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
454       __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
455       __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
456       __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
457       __ lwr(t0,
458              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
459       __ lwr(t1,
460              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
461       __ lwr(t2,
462              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
463       __ lwr(t3,
464              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
465       __ lwr(t4,
466              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
467       __ lwr(t5,
468              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
469       __ lwr(t6,
470              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
471       __ lwr(t7,
472              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
473     }
474     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
475     __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
476     __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
477     __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
478     __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
479     __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
480     __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
481     __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
482     __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
483     __ addiu(a0, a0, 16 * loadstore_chunk);
484     __ bne(a0, a3, &ua_loop16w);
485     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
486     __ mov(a2, t8);
487 
488     // Here less than 64-bytes. Check for
489     // a 32 byte chunk and copy if there is one. Otherwise jump down to
490     // ua_chk1w to handle the tail end of the copy.
491     __ bind(&ua_chkw);
492     __ Pref(pref_hint_load, MemOperand(a1));
493     __ andi(t8, a2, 0x1f);
494 
495     __ beq(a2, t8, &ua_chk1w);
496     __ nop();  // In delay slot.
497     if (kArchEndian == kLittle) {
498       __ lwr(t0, MemOperand(a1));
499       __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
500       __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
501       __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
502       __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
503       __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
504       __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
505       __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
506       __ lwl(t0,
507              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
508       __ lwl(t1,
509              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
510       __ lwl(t2,
511              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
512       __ lwl(t3,
513              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
514       __ lwl(t4,
515              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
516       __ lwl(t5,
517              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
518       __ lwl(t6,
519              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
520       __ lwl(t7,
521              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
522     } else {
523       __ lwl(t0, MemOperand(a1));
524       __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
525       __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
526       __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
527       __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
528       __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
529       __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
530       __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
531       __ lwr(t0,
532              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
533       __ lwr(t1,
534              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
535       __ lwr(t2,
536              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
537       __ lwr(t3,
538              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
539       __ lwr(t4,
540              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
541       __ lwr(t5,
542              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
543       __ lwr(t6,
544              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
545       __ lwr(t7,
546              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
547     }
548     __ addiu(a1, a1, 8 * loadstore_chunk);
549     __ sw(t0, MemOperand(a0));
550     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
551     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
552     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
553     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
554     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
555     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
556     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
557     __ addiu(a0, a0, 8 * loadstore_chunk);
558 
559     // Less than 32 bytes to copy. Set up for a loop to
560     // copy one word at a time.
561     __ bind(&ua_chk1w);
562     __ andi(a2, t8, loadstore_chunk - 1);
563     __ beq(a2, t8, &ua_smallCopy);
564     __ subu(a3, t8, a2);  // In delay slot.
565     __ addu(a3, a0, a3);
566 
567     __ bind(&ua_wordCopy_loop);
568     if (kArchEndian == kLittle) {
569       __ lwr(v1, MemOperand(a1));
570       __ lwl(v1,
571              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
572     } else {
573       __ lwl(v1, MemOperand(a1));
574       __ lwr(v1,
575              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
576     }
577     __ addiu(a0, a0, loadstore_chunk);
578     __ addiu(a1, a1, loadstore_chunk);
579     __ bne(a0, a3, &ua_wordCopy_loop);
580     __ sw(v1, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
581 
582     // Copy the last 8 bytes.
583     __ bind(&ua_smallCopy);
584     __ beq(a2, zero_reg, &leave);
585     __ addu(a3, a0, a2);  // In delay slot.
586 
587     __ bind(&ua_smallCopy_loop);
588     __ lb(v1, MemOperand(a1));
589     __ addiu(a0, a0, 1);
590     __ addiu(a1, a1, 1);
591     __ bne(a0, a3, &ua_smallCopy_loop);
592     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
593 
594     __ jr(ra);
595     __ nop();
596   }
597   CodeDesc desc;
598   masm.GetCode(&desc);
599   DCHECK(!RelocInfo::RequiresRelocation(desc));
600 
601   Assembler::FlushICache(isolate, buffer, actual_size);
602   base::OS::ProtectCode(buffer, actual_size);
603   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
604 #endif
605 }
606 #endif
607 
CreateSqrtFunction(Isolate * isolate)608 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
609 #if defined(USE_SIMULATOR)
610   return nullptr;
611 #else
612   size_t actual_size;
613   byte* buffer =
614       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
615   if (buffer == nullptr) return nullptr;
616 
617   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
618                       CodeObjectRequired::kNo);
619 
620   __ MovFromFloatParameter(f12);
621   __ sqrt_d(f0, f12);
622   __ MovToFloatResult(f0);
623   __ Ret();
624 
625   CodeDesc desc;
626   masm.GetCode(&desc);
627   DCHECK(!RelocInfo::RequiresRelocation(desc));
628 
629   Assembler::FlushICache(isolate, buffer, actual_size);
630   base::OS::ProtectCode(buffer, actual_size);
631   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
632 #endif
633 }
634 
635 #undef __
636 
637 
638 // -------------------------------------------------------------------------
639 // Platform-specific RuntimeCallHelper functions.
640 
BeforeCall(MacroAssembler * masm) const641 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
642   masm->EnterFrame(StackFrame::INTERNAL);
643   DCHECK(!masm->has_frame());
644   masm->set_has_frame(true);
645 }
646 
647 
AfterCall(MacroAssembler * masm) const648 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
649   masm->LeaveFrame(StackFrame::INTERNAL);
650   DCHECK(masm->has_frame());
651   masm->set_has_frame(false);
652 }
653 
654 
655 // -------------------------------------------------------------------------
656 // Code generators
657 
658 #define __ ACCESS_MASM(masm)
659 
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)660 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
661     MacroAssembler* masm,
662     Register receiver,
663     Register key,
664     Register value,
665     Register target_map,
666     AllocationSiteMode mode,
667     Label* allocation_memento_found) {
668   Register scratch_elements = t0;
669   DCHECK(!AreAliased(receiver, key, value, target_map,
670                      scratch_elements));
671 
672   if (mode == TRACK_ALLOCATION_SITE) {
673     DCHECK(allocation_memento_found != NULL);
674     __ JumpIfJSArrayHasAllocationMemento(
675         receiver, scratch_elements, allocation_memento_found);
676   }
677 
678   // Set transitioned map.
679   __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
680   __ RecordWriteField(receiver,
681                       HeapObject::kMapOffset,
682                       target_map,
683                       t5,
684                       kRAHasNotBeenSaved,
685                       kDontSaveFPRegs,
686                       EMIT_REMEMBERED_SET,
687                       OMIT_SMI_CHECK);
688 }
689 
690 
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)691 void ElementsTransitionGenerator::GenerateSmiToDouble(
692     MacroAssembler* masm,
693     Register receiver,
694     Register key,
695     Register value,
696     Register target_map,
697     AllocationSiteMode mode,
698     Label* fail) {
699   // Register ra contains the return address.
700   Label loop, entry, convert_hole, gc_required, only_change_map, done;
701   Register elements = t0;
702   Register length = t1;
703   Register array = t2;
704   Register array_end = array;
705 
706   // target_map parameter can be clobbered.
707   Register scratch1 = target_map;
708   Register scratch2 = t5;
709   Register scratch3 = t3;
710 
711   // Verify input registers don't conflict with locals.
712   DCHECK(!AreAliased(receiver, key, value, target_map,
713                      elements, length, array, scratch2));
714 
715   Register scratch = t6;
716 
717   if (mode == TRACK_ALLOCATION_SITE) {
718     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
719   }
720 
721   // Check for empty arrays, which only require a map transition and no changes
722   // to the backing store.
723   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
724   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
725   __ Branch(&only_change_map, eq, at, Operand(elements));
726 
727   __ push(ra);
728   __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
729   // elements: source FixedArray
730   // length: number of elements (smi-tagged)
731 
732   // Allocate new FixedDoubleArray.
733   __ sll(scratch, length, 2);
734   __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
735   __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
736   // array: destination FixedDoubleArray, not tagged as heap object
737 
738   // Set destination FixedDoubleArray's length and map.
739   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
740   __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
741   // Update receiver's map.
742   __ sw(scratch2, MemOperand(array, HeapObject::kMapOffset));
743 
744   __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
745   __ RecordWriteField(receiver,
746                       HeapObject::kMapOffset,
747                       target_map,
748                       scratch2,
749                       kRAHasBeenSaved,
750                       kDontSaveFPRegs,
751                       OMIT_REMEMBERED_SET,
752                       OMIT_SMI_CHECK);
753   // Replace receiver's backing store with newly created FixedDoubleArray.
754   __ Addu(scratch1, array, Operand(kHeapObjectTag));
755   __ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
756   __ RecordWriteField(receiver,
757                       JSObject::kElementsOffset,
758                       scratch1,
759                       scratch2,
760                       kRAHasBeenSaved,
761                       kDontSaveFPRegs,
762                       EMIT_REMEMBERED_SET,
763                       OMIT_SMI_CHECK);
764 
765 
766   // Prepare for conversion loop.
767   __ Addu(scratch1, elements,
768       Operand(FixedArray::kHeaderSize - kHeapObjectTag));
769   __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
770   __ sll(at, length, 2);
771   __ Addu(array_end, scratch3, at);
772 
773   // Repurpose registers no longer in use.
774   Register hole_lower = elements;
775   Register hole_upper = length;
776   __ li(hole_lower, Operand(kHoleNanLower32));
777   __ li(hole_upper, Operand(kHoleNanUpper32));
778 
779   // scratch1: begin of source FixedArray element fields, not tagged
780   // hole_lower: kHoleNanLower32
781   // hole_upper: kHoleNanUpper32
782   // array_end: end of destination FixedDoubleArray, not tagged
783   // scratch3: begin of FixedDoubleArray element fields, not tagged
784 
785   __ Branch(&entry);
786 
787   __ bind(&only_change_map);
788   __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
789   __ RecordWriteField(receiver,
790                       HeapObject::kMapOffset,
791                       target_map,
792                       scratch2,
793                       kRAHasBeenSaved,
794                       kDontSaveFPRegs,
795                       OMIT_REMEMBERED_SET,
796                       OMIT_SMI_CHECK);
797   __ Branch(&done);
798 
799   // Call into runtime if GC is required.
800   __ bind(&gc_required);
801   __ lw(ra, MemOperand(sp, 0));
802   __ Branch(USE_DELAY_SLOT, fail);
803   __ addiu(sp, sp, kPointerSize);  // In delay slot.
804 
805   // Convert and copy elements.
806   __ bind(&loop);
807   __ lw(scratch2, MemOperand(scratch1));
808   __ Addu(scratch1, scratch1, kIntSize);
809   // scratch2: current element
810   __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole);
811 
812   // Normal smi, convert to double and store.
813   __ mtc1(scratch2, f0);
814   __ cvt_d_w(f0, f0);
815   __ sdc1(f0, MemOperand(scratch3));
816   __ Branch(USE_DELAY_SLOT, &entry);
817   __ addiu(scratch3, scratch3, kDoubleSize);  // In delay slot.
818 
819   // Hole found, store the-hole NaN.
820   __ bind(&convert_hole);
821   if (FLAG_debug_code) {
822     // Restore a "smi-untagged" heap object.
823     __ SmiTag(scratch2);
824     __ Or(scratch2, scratch2, Operand(1));
825     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
826     __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
827   }
828   // mantissa
829   __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
830   // exponent
831   __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
832   __ addiu(scratch3, scratch3, kDoubleSize);
833 
834   __ bind(&entry);
835   __ Branch(&loop, lt, scratch3, Operand(array_end));
836 
837   __ bind(&done);
838   __ pop(ra);
839 }
840 
841 
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)842 void ElementsTransitionGenerator::GenerateDoubleToObject(
843     MacroAssembler* masm,
844     Register receiver,
845     Register key,
846     Register value,
847     Register target_map,
848     AllocationSiteMode mode,
849     Label* fail) {
850   // Register ra contains the return address.
851   Label entry, loop, convert_hole, gc_required, only_change_map;
852   Register elements = t0;
853   Register array = t2;
854   Register length = t1;
855   Register scratch = t5;
856 
857   // Verify input registers don't conflict with locals.
858   DCHECK(!AreAliased(receiver, key, value, target_map,
859                      elements, array, length, scratch));
860 
861   if (mode == TRACK_ALLOCATION_SITE) {
862     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
863   }
864 
865   // Check for empty arrays, which only require a map transition and no changes
866   // to the backing store.
867   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
868   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
869   __ Branch(&only_change_map, eq, at, Operand(elements));
870 
871   __ MultiPush(
872       value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
873 
874   __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
875   // elements: source FixedArray
876   // length: number of elements (smi-tagged)
877 
878   // Allocate new FixedArray.
879   // Re-use value and target_map registers, as they have been saved on the
880   // stack.
881   Register array_size = value;
882   Register allocate_scratch = target_map;
883   __ sll(array_size, length, 1);
884   __ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize);
885   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
886               NO_ALLOCATION_FLAGS);
887   // array: destination FixedArray, not tagged as heap object
888   // Set destination FixedDoubleArray's length and map.
889   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
890   __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
891   __ sw(scratch, MemOperand(array, HeapObject::kMapOffset));
892 
893   // Prepare for conversion loop.
894   Register src_elements = elements;
895   Register dst_elements = target_map;
896   Register dst_end = length;
897   Register heap_number_map = scratch;
898   __ Addu(src_elements, src_elements, Operand(
899         FixedDoubleArray::kHeaderSize - kHeapObjectTag
900         + Register::kExponentOffset));
901   __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
902   __ sll(dst_end, dst_end, 1);
903   __ Addu(dst_end, dst_elements, dst_end);
904 
905   // Allocating heap numbers in the loop below can fail and cause a jump to
906   // gc_required. We can't leave a partly initialized FixedArray behind,
907   // so pessimistically fill it with holes now.
908   Label initialization_loop, initialization_loop_entry;
909   __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
910   __ Branch(&initialization_loop_entry);
911   __ bind(&initialization_loop);
912   __ sw(scratch, MemOperand(dst_elements));
913   __ Addu(dst_elements, dst_elements, Operand(kPointerSize));
914   __ bind(&initialization_loop_entry);
915   __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
916 
917   __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
918   __ Addu(array, array, Operand(kHeapObjectTag));
919   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
920   // Using offsetted addresses.
921   // dst_elements: begin of destination FixedArray element fields, not tagged
922   // src_elements: begin of source FixedDoubleArray element fields, not tagged,
923   //               points to the exponent
924   // dst_end: end of destination FixedArray, not tagged
925   // array: destination FixedArray
926   // heap_number_map: heap number map
927   __ Branch(&entry);
928 
929   // Call into runtime if GC is required.
930   __ bind(&gc_required);
931   __ MultiPop(
932       value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
933 
934   __ Branch(fail);
935 
936   __ bind(&loop);
937   Register upper_bits = key;
938   __ lw(upper_bits, MemOperand(src_elements));
939   __ Addu(src_elements, src_elements, kDoubleSize);
940   // upper_bits: current element's upper 32 bit
941   // src_elements: address of next element's upper 32 bit
942   __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
943 
944   // Non-hole double, copy value into a heap number.
945   Register heap_number = receiver;
946   Register scratch2 = value;
947   Register scratch3 = t6;
948   __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
949                         &gc_required);
950   // heap_number: new heap number
951   // Load mantissa of current element, src_elements
952   // point to exponent of next element.
953   __ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset
954       - Register::kExponentOffset - kDoubleSize)));
955   __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
956   __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
957   __ mov(scratch2, dst_elements);
958   __ sw(heap_number, MemOperand(dst_elements));
959   __ Addu(dst_elements, dst_elements, kIntSize);
960   __ RecordWrite(array,
961                  scratch2,
962                  heap_number,
963                  kRAHasBeenSaved,
964                  kDontSaveFPRegs,
965                  EMIT_REMEMBERED_SET,
966                  OMIT_SMI_CHECK);
967   __ Branch(&entry);
968 
969   // Replace the-hole NaN with the-hole pointer.
970   __ bind(&convert_hole);
971   __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
972   __ sw(scratch2, MemOperand(dst_elements));
973   __ Addu(dst_elements, dst_elements, kIntSize);
974 
975   __ bind(&entry);
976   __ Branch(&loop, lt, dst_elements, Operand(dst_end));
977 
978   __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
979   // Replace receiver's backing store with newly created and filled FixedArray.
980   __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
981   __ RecordWriteField(receiver,
982                       JSObject::kElementsOffset,
983                       array,
984                       scratch,
985                       kRAHasBeenSaved,
986                       kDontSaveFPRegs,
987                       EMIT_REMEMBERED_SET,
988                       OMIT_SMI_CHECK);
989   __ pop(ra);
990 
991   __ bind(&only_change_map);
992   // Update receiver's map.
993   __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
994   __ RecordWriteField(receiver,
995                       HeapObject::kMapOffset,
996                       target_map,
997                       scratch,
998                       kRAHasNotBeenSaved,
999                       kDontSaveFPRegs,
1000                       OMIT_REMEMBERED_SET,
1001                       OMIT_SMI_CHECK);
1002 }
1003 
1004 
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)1005 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
1006                                        Register string,
1007                                        Register index,
1008                                        Register result,
1009                                        Label* call_runtime) {
1010   // Fetch the instance type of the receiver into result register.
1011   __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
1012   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
1013 
1014   // We need special handling for indirect strings.
1015   Label check_sequential;
1016   __ And(at, result, Operand(kIsIndirectStringMask));
1017   __ Branch(&check_sequential, eq, at, Operand(zero_reg));
1018 
1019   // Dispatch on the indirect string shape: slice or cons.
1020   Label cons_string;
1021   __ And(at, result, Operand(kSlicedNotConsMask));
1022   __ Branch(&cons_string, eq, at, Operand(zero_reg));
1023 
1024   // Handle slices.
1025   Label indirect_string_loaded;
1026   __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
1027   __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
1028   __ sra(at, result, kSmiTagSize);
1029   __ Addu(index, index, at);
1030   __ jmp(&indirect_string_loaded);
1031 
1032   // Handle cons strings.
1033   // Check whether the right hand side is the empty string (i.e. if
1034   // this is really a flat string in a cons string). If that is not
1035   // the case we would rather go to the runtime system now to flatten
1036   // the string.
1037   __ bind(&cons_string);
1038   __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
1039   __ LoadRoot(at, Heap::kempty_stringRootIndex);
1040   __ Branch(call_runtime, ne, result, Operand(at));
1041   // Get the first of the two strings and load its instance type.
1042   __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
1043 
1044   __ bind(&indirect_string_loaded);
1045   __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
1046   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
1047 
1048   // Distinguish sequential and external strings. Only these two string
1049   // representations can reach here (slices and flat cons strings have been
1050   // reduced to the underlying sequential or external string).
1051   Label external_string, check_encoding;
1052   __ bind(&check_sequential);
1053   STATIC_ASSERT(kSeqStringTag == 0);
1054   __ And(at, result, Operand(kStringRepresentationMask));
1055   __ Branch(&external_string, ne, at, Operand(zero_reg));
1056 
1057   // Prepare sequential strings
1058   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1059   __ Addu(string,
1060           string,
1061           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
1062   __ jmp(&check_encoding);
1063 
1064   // Handle external strings.
1065   __ bind(&external_string);
1066   if (FLAG_debug_code) {
1067     // Assert that we do not have a cons or slice (indirect strings) here.
1068     // Sequential strings have already been ruled out.
1069     __ And(at, result, Operand(kIsIndirectStringMask));
1070     __ Assert(eq, kExternalStringExpectedButNotFound,
1071         at, Operand(zero_reg));
1072   }
1073   // Rule out short external strings.
1074   STATIC_ASSERT(kShortExternalStringTag != 0);
1075   __ And(at, result, Operand(kShortExternalStringMask));
1076   __ Branch(call_runtime, ne, at, Operand(zero_reg));
1077   __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
1078 
1079   Label one_byte, done;
1080   __ bind(&check_encoding);
1081   STATIC_ASSERT(kTwoByteStringTag == 0);
1082   __ And(at, result, Operand(kStringEncodingMask));
1083   __ Branch(&one_byte, ne, at, Operand(zero_reg));
1084   // Two-byte string.
1085   __ sll(at, index, 1);
1086   __ Addu(at, string, at);
1087   __ lhu(result, MemOperand(at));
1088   __ jmp(&done);
1089   __ bind(&one_byte);
1090   // One_byte string.
1091   __ Addu(at, string, index);
1092   __ lbu(result, MemOperand(at));
1093   __ bind(&done);
1094 }
1095 
1096 
ExpConstant(int index,Register base)1097 static MemOperand ExpConstant(int index, Register base) {
1098   return MemOperand(base, index * kDoubleSize);
1099 }
1100 
1101 
EmitMathExp(MacroAssembler * masm,DoubleRegister input,DoubleRegister result,DoubleRegister double_scratch1,DoubleRegister double_scratch2,Register temp1,Register temp2,Register temp3)1102 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
1103                                    DoubleRegister input,
1104                                    DoubleRegister result,
1105                                    DoubleRegister double_scratch1,
1106                                    DoubleRegister double_scratch2,
1107                                    Register temp1,
1108                                    Register temp2,
1109                                    Register temp3) {
1110   DCHECK(!input.is(result));
1111   DCHECK(!input.is(double_scratch1));
1112   DCHECK(!input.is(double_scratch2));
1113   DCHECK(!result.is(double_scratch1));
1114   DCHECK(!result.is(double_scratch2));
1115   DCHECK(!double_scratch1.is(double_scratch2));
1116   DCHECK(!temp1.is(temp2));
1117   DCHECK(!temp1.is(temp3));
1118   DCHECK(!temp2.is(temp3));
1119   DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
1120   DCHECK(!masm->serializer_enabled());  // External references not serializable.
1121 
1122   Label zero, infinity, done;
1123 
1124   __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
1125 
1126   __ ldc1(double_scratch1, ExpConstant(0, temp3));
1127   __ BranchF(&zero, NULL, ge, double_scratch1, input);
1128 
1129   __ ldc1(double_scratch2, ExpConstant(1, temp3));
1130   __ BranchF(&infinity, NULL, ge, input, double_scratch2);
1131 
1132   __ ldc1(double_scratch1, ExpConstant(3, temp3));
1133   __ ldc1(result, ExpConstant(4, temp3));
1134   __ mul_d(double_scratch1, double_scratch1, input);
1135   __ add_d(double_scratch1, double_scratch1, result);
1136   __ FmoveLow(temp2, double_scratch1);
1137   __ sub_d(double_scratch1, double_scratch1, result);
1138   __ ldc1(result, ExpConstant(6, temp3));
1139   __ ldc1(double_scratch2, ExpConstant(5, temp3));
1140   __ mul_d(double_scratch1, double_scratch1, double_scratch2);
1141   __ sub_d(double_scratch1, double_scratch1, input);
1142   __ sub_d(result, result, double_scratch1);
1143   __ mul_d(double_scratch2, double_scratch1, double_scratch1);
1144   __ mul_d(result, result, double_scratch2);
1145   __ ldc1(double_scratch2, ExpConstant(7, temp3));
1146   __ mul_d(result, result, double_scratch2);
1147   __ sub_d(result, result, double_scratch1);
1148   // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
1149   DCHECK(*reinterpret_cast<double*>
1150          (ExternalReference::math_exp_constants(8).address()) == 1);
1151   __ Move(double_scratch2, 1.);
1152   __ add_d(result, result, double_scratch2);
1153   __ srl(temp1, temp2, 11);
1154   __ Ext(temp2, temp2, 0, 11);
1155   __ Addu(temp1, temp1, Operand(0x3ff));
1156 
1157   // Must not call ExpConstant() after overwriting temp3!
1158   __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1159   __ sll(at, temp2, 3);
1160   __ Addu(temp3, temp3, Operand(at));
1161   __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
1162   __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
1163   // The first word is loaded is the lower number register.
1164   if (temp2.code() < temp3.code()) {
1165     __ sll(at, temp1, 20);
1166     __ Or(temp1, temp3, at);
1167     __ Move(double_scratch1, temp2, temp1);
1168   } else {
1169     __ sll(at, temp1, 20);
1170     __ Or(temp1, temp2, at);
1171     __ Move(double_scratch1, temp3, temp1);
1172   }
1173   __ mul_d(result, result, double_scratch1);
1174   __ BranchShort(&done);
1175 
1176   __ bind(&zero);
1177   __ Move(result, kDoubleRegZero);
1178   __ BranchShort(&done);
1179 
1180   __ bind(&infinity);
1181   __ ldc1(result, ExpConstant(2, temp3));
1182 
1183   __ bind(&done);
1184 }
1185 
1186 #ifdef DEBUG
1187 // nop(CODE_AGE_MARKER_NOP)
1188 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1189 #endif
1190 
1191 
CodeAgingHelper(Isolate * isolate)1192 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
1193   USE(isolate);
1194   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
1195   // Since patcher is a large object, allocate it dynamically when needed,
1196   // to avoid overloading the stack in stress conditions.
1197   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
1198   // the process, before MIPS simulator ICache is setup.
1199   base::SmartPointer<CodePatcher> patcher(
1200       new CodePatcher(isolate, young_sequence_.start(),
1201                       young_sequence_.length() / Assembler::kInstrSize,
1202                       CodePatcher::DONT_FLUSH));
1203   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
1204   patcher->masm()->Push(ra, fp, cp, a1);
1205   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1206   patcher->masm()->Addu(
1207       fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
1208 }
1209 
1210 
1211 #ifdef DEBUG
IsOld(byte * candidate) const1212 bool CodeAgingHelper::IsOld(byte* candidate) const {
1213   return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
1214 }
1215 #endif
1216 
1217 
IsYoungSequence(Isolate * isolate,byte * sequence)1218 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
1219   bool result = isolate->code_aging_helper()->IsYoung(sequence);
1220   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
1221   return result;
1222 }
1223 
1224 
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)1225 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
1226                                MarkingParity* parity) {
1227   if (IsYoungSequence(isolate, sequence)) {
1228     *age = kNoAgeCodeAge;
1229     *parity = NO_MARKING_PARITY;
1230   } else {
1231     Address target_address = Assembler::target_address_at(
1232         sequence + Assembler::kInstrSize);
1233     Code* stub = GetCodeFromTargetAddress(target_address);
1234     GetCodeAgeAndParity(stub, age, parity);
1235   }
1236 }
1237 
1238 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)1239 void Code::PatchPlatformCodeAge(Isolate* isolate,
1240                                 byte* sequence,
1241                                 Code::Age age,
1242                                 MarkingParity parity) {
1243   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
1244   if (age == kNoAgeCodeAge) {
1245     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
1246     Assembler::FlushICache(isolate, sequence, young_length);
1247   } else {
1248     Code* stub = GetCodeAgeStub(isolate, age, parity);
1249     CodePatcher patcher(isolate, sequence,
1250                         young_length / Assembler::kInstrSize);
1251     // Mark this code sequence for FindPlatformCodeAgeSequence().
1252     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
1253     // Load the stub address to t9 and call it,
1254     // GetCodeAgeAndParity() extracts the stub address from this instruction.
1255     patcher.masm()->li(
1256         t9,
1257         Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
1258         CONSTANT_SIZE);
1259     patcher.masm()->nop();  // Prevent jalr to jal optimization.
1260     patcher.masm()->jalr(t9, a0);
1261     patcher.masm()->nop();  // Branch delay slot nop.
1262     patcher.masm()->nop();  // Pad the empty space.
1263   }
1264 }
1265 
1266 
1267 #undef __
1268 
1269 }  // namespace internal
1270 }  // namespace v8
1271 
1272 #endif  // V8_TARGET_ARCH_MIPS
1273