• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/mips64/codegen-mips64.h"
6 
7 #if V8_TARGET_ARCH_MIPS64
8 
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 #include "src/mips64/simulator-mips64.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 #define __ masm.
18 
19 
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_mips_machine_code = nullptr;
fast_exp_simulator(double x,Isolate * isolate)22 double fast_exp_simulator(double x, Isolate* isolate) {
23   return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
24 }
25 #endif
26 
27 
CreateExpFunction(Isolate * isolate)28 UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
29   size_t actual_size;
30   byte* buffer =
31       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
32   if (buffer == nullptr) return nullptr;
33   ExternalReference::InitializeMathExpData();
34 
35   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
36                       CodeObjectRequired::kNo);
37 
38   {
39     DoubleRegister input = f12;
40     DoubleRegister result = f0;
41     DoubleRegister double_scratch1 = f4;
42     DoubleRegister double_scratch2 = f6;
43     Register temp1 = a4;
44     Register temp2 = a5;
45     Register temp3 = a6;
46 
47     __ MovFromFloatParameter(input);
48     __ Push(temp3, temp2, temp1);
49     MathExpGenerator::EmitMathExp(
50         &masm, input, result, double_scratch1, double_scratch2,
51         temp1, temp2, temp3);
52     __ Pop(temp3, temp2, temp1);
53     __ MovToFloatResult(result);
54     __ Ret();
55   }
56 
57   CodeDesc desc;
58   masm.GetCode(&desc);
59   DCHECK(!RelocInfo::RequiresRelocation(desc));
60 
61   Assembler::FlushICache(isolate, buffer, actual_size);
62   base::OS::ProtectCode(buffer, actual_size);
63 
64 #if !defined(USE_SIMULATOR)
65   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
66 #else
67   fast_exp_mips_machine_code = buffer;
68   return &fast_exp_simulator;
69 #endif
70 }
71 
72 
73 #if defined(V8_HOST_ARCH_MIPS)
CreateMemCopyUint8Function(Isolate * isolate,MemCopyUint8Function stub)74 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
75                                                 MemCopyUint8Function stub) {
76 #if defined(USE_SIMULATOR)
77   return stub;
78 #else
79 
80   size_t actual_size;
81   byte* buffer =
82       static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
83   if (buffer == nullptr) return stub;
84 
85   // This code assumes that cache lines are 32 bytes and if the cache line is
86   // larger it will not work correctly.
87   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
88                       CodeObjectRequired::kNo);
89 
90   {
91     Label lastb, unaligned, aligned, chkw,
92           loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
93           leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
94           ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
95 
96     // The size of each prefetch.
97     uint32_t pref_chunk = 32;
98     // The maximum size of a prefetch, it must not be less then pref_chunk.
99     // If the real size of a prefetch is greater then max_pref_size and
100     // the kPrefHintPrepareForStore hint is used, the code will not work
101     // correctly.
102     uint32_t max_pref_size = 128;
103     DCHECK(pref_chunk < max_pref_size);
104 
105     // pref_limit is set based on the fact that we never use an offset
106     // greater then 5 on a store pref and that a single pref can
107     // never be larger then max_pref_size.
108     uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
109     int32_t pref_hint_load = kPrefHintLoadStreamed;
110     int32_t pref_hint_store = kPrefHintPrepareForStore;
111     uint32_t loadstore_chunk = 4;
112 
113     // The initial prefetches may fetch bytes that are before the buffer being
114     // copied. Start copies with an offset of 4 so avoid this situation when
115     // using kPrefHintPrepareForStore.
116     DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
117            pref_chunk * 4 >= max_pref_size);
118     // If the size is less than 8, go to lastb. Regardless of size,
119     // copy dst pointer to v0 for the retuen value.
120     __ slti(a6, a2, 2 * loadstore_chunk);
121     __ bne(a6, zero_reg, &lastb);
122     __ mov(v0, a0);  // In delay slot.
123 
124     // If src and dst have different alignments, go to unaligned, if they
125     // have the same alignment (but are not actually aligned) do a partial
126     // load/store to make them aligned. If they are both already aligned
127     // we can start copying at aligned.
128     __ xor_(t8, a1, a0);
129     __ andi(t8, t8, loadstore_chunk - 1);  // t8 is a0/a1 word-displacement.
130     __ bne(t8, zero_reg, &unaligned);
131     __ subu(a3, zero_reg, a0);  // In delay slot.
132 
133     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
134     __ beq(a3, zero_reg, &aligned);  // Already aligned.
135     __ subu(a2, a2, a3);  // In delay slot. a2 is the remining bytes count.
136 
137     if (kArchEndian == kLittle) {
138       __ lwr(t8, MemOperand(a1));
139       __ addu(a1, a1, a3);
140       __ swr(t8, MemOperand(a0));
141       __ addu(a0, a0, a3);
142     } else {
143       __ lwl(t8, MemOperand(a1));
144       __ addu(a1, a1, a3);
145       __ swl(t8, MemOperand(a0));
146       __ addu(a0, a0, a3);
147     }
148 
149     // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
150     // count how many bytes we have to copy after all the 64 byte chunks are
151     // copied and a3 to the dst pointer after all the 64 byte chunks have been
152     // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
153     __ bind(&aligned);
154     __ andi(t8, a2, 0x3f);
155     __ beq(a2, t8, &chkw);  // Less than 64?
156     __ subu(a3, a2, t8);  // In delay slot.
157     __ addu(a3, a0, a3);  // Now a3 is the final dst after loop.
158 
159     // When in the loop we prefetch with kPrefHintPrepareForStore hint,
160     // in this case the a0+x should be past the "a4-32" address. This means:
161     // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for
162     // x=64 the last "safe" a0 address is "a4-96". In the current version we
163     // will use "pref hint, 128(a0)", so "a4-160" is the limit.
164     if (pref_hint_store == kPrefHintPrepareForStore) {
165       __ addu(a4, a0, a2);  // a4 is the "past the end" address.
166       __ Subu(t9, a4, pref_limit);  // t9 is the "last safe pref" address.
167     }
168 
169     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
170     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
171     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
172     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
173 
174     if (pref_hint_store != kPrefHintPrepareForStore) {
175       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
176       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
177       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
178     }
179     __ bind(&loop16w);
180     __ lw(a4, MemOperand(a1));
181 
182     if (pref_hint_store == kPrefHintPrepareForStore) {
183       __ sltu(v1, t9, a0);  // If a0 > t9, don't use next prefetch.
184       __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
185     }
186     __ lw(a5, MemOperand(a1, 1, loadstore_chunk));  // Maybe in delay slot.
187 
188     __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
189     __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
190 
191     __ bind(&skip_pref);
192     __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
193     __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
194     __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
195     __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
196     __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
197     __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
198     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
199 
200     __ sw(a4, MemOperand(a0));
201     __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
202     __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
203     __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
204     __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
205     __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
206     __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
207     __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
208 
209     __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
210     __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
211     __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
212     __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
213     __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
214     __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
215     __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
216     __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
217     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
218 
219     __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
220     __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
221     __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
222     __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
223     __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
224     __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
225     __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
226     __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
227     __ addiu(a0, a0, 16 * loadstore_chunk);
228     __ bne(a0, a3, &loop16w);
229     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
230     __ mov(a2, t8);
231 
232     // Here we have src and dest word-aligned but less than 64-bytes to go.
233     // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
234     // down to chk1w to handle the tail end of the copy.
235     __ bind(&chkw);
236     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
237     __ andi(t8, a2, 0x1f);
238     __ beq(a2, t8, &chk1w);  // Less than 32?
239     __ nop();  // In delay slot.
240     __ lw(a4, MemOperand(a1));
241     __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
242     __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
243     __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
244     __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
245     __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
246     __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
247     __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
248     __ addiu(a1, a1, 8 * loadstore_chunk);
249     __ sw(a4, MemOperand(a0));
250     __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
251     __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
252     __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
253     __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
254     __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
255     __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
256     __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
257     __ addiu(a0, a0, 8 * loadstore_chunk);
258 
259     // Here we have less than 32 bytes to copy. Set up for a loop to copy
260     // one word at a time. Set a2 to count how many bytes we have to copy
261     // after all the word chunks are copied and a3 to the dst pointer after
262     // all the word chunks have been copied. We will loop, incrementing a0
263     // and a1 untill a0 equals a3.
264     __ bind(&chk1w);
265     __ andi(a2, t8, loadstore_chunk - 1);
266     __ beq(a2, t8, &lastb);
267     __ subu(a3, t8, a2);  // In delay slot.
268     __ addu(a3, a0, a3);
269 
270     __ bind(&wordCopy_loop);
271     __ lw(a7, MemOperand(a1));
272     __ addiu(a0, a0, loadstore_chunk);
273     __ addiu(a1, a1, loadstore_chunk);
274     __ bne(a0, a3, &wordCopy_loop);
275     __ sw(a7, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
276 
277     __ bind(&lastb);
278     __ Branch(&leave, le, a2, Operand(zero_reg));
279     __ addu(a3, a0, a2);
280 
281     __ bind(&lastbloop);
282     __ lb(v1, MemOperand(a1));
283     __ addiu(a0, a0, 1);
284     __ addiu(a1, a1, 1);
285     __ bne(a0, a3, &lastbloop);
286     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
287 
288     __ bind(&leave);
289     __ jr(ra);
290     __ nop();
291 
292     // Unaligned case. Only the dst gets aligned so we need to do partial
293     // loads of the source followed by normal stores to the dst (once we
294     // have aligned the destination).
295     __ bind(&unaligned);
296     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
297     __ beq(a3, zero_reg, &ua_chk16w);
298     __ subu(a2, a2, a3);  // In delay slot.
299 
300     if (kArchEndian == kLittle) {
301       __ lwr(v1, MemOperand(a1));
302       __ lwl(v1,
303              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
304       __ addu(a1, a1, a3);
305       __ swr(v1, MemOperand(a0));
306       __ addu(a0, a0, a3);
307     } else {
308       __ lwl(v1, MemOperand(a1));
309       __ lwr(v1,
310              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
311       __ addu(a1, a1, a3);
312       __ swl(v1, MemOperand(a0));
313       __ addu(a0, a0, a3);
314     }
315 
316     // Now the dst (but not the source) is aligned. Set a2 to count how many
317     // bytes we have to copy after all the 64 byte chunks are copied and a3 to
318     // the dst pointer after all the 64 byte chunks have been copied. We will
319     // loop, incrementing a0 and a1 until a0 equals a3.
320     __ bind(&ua_chk16w);
321     __ andi(t8, a2, 0x3f);
322     __ beq(a2, t8, &ua_chkw);
323     __ subu(a3, a2, t8);  // In delay slot.
324     __ addu(a3, a0, a3);
325 
326     if (pref_hint_store == kPrefHintPrepareForStore) {
327       __ addu(a4, a0, a2);
328       __ Subu(t9, a4, pref_limit);
329     }
330 
331     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
332     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
333     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
334 
335     if (pref_hint_store != kPrefHintPrepareForStore) {
336       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
337       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
338       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
339     }
340 
341     __ bind(&ua_loop16w);
342     if (kArchEndian == kLittle) {
343       __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
344       __ lwr(a4, MemOperand(a1));
345       __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
346       __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
347 
348       if (pref_hint_store == kPrefHintPrepareForStore) {
349         __ sltu(v1, t9, a0);
350         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
351       }
352       __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
353 
354       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
355       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
356 
357       __ bind(&ua_skip_pref);
358       __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
359       __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
360       __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
361       __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
362       __ lwl(a4,
363              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
364       __ lwl(a5,
365              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
366       __ lwl(a6,
367              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
368       __ lwl(a7,
369              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
370       __ lwl(t0,
371              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
372       __ lwl(t1,
373              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
374       __ lwl(t2,
375              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
376       __ lwl(t3,
377              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
378     } else {
379       __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
380       __ lwl(a4, MemOperand(a1));
381       __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
382       __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
383 
384       if (pref_hint_store == kPrefHintPrepareForStore) {
385         __ sltu(v1, t9, a0);
386         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
387       }
388       __ lwl(a7, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
389 
390       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
391       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
392 
393       __ bind(&ua_skip_pref);
394       __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
395       __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
396       __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
397       __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
398       __ lwr(a4,
399              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
400       __ lwr(a5,
401              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
402       __ lwr(a6,
403              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
404       __ lwr(a7,
405              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
406       __ lwr(t0,
407              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
408       __ lwr(t1,
409              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
410       __ lwr(t2,
411              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
412       __ lwr(t3,
413              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
414     }
415     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
416     __ sw(a4, MemOperand(a0));
417     __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
418     __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
419     __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
420     __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
421     __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
422     __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
423     __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
424     if (kArchEndian == kLittle) {
425       __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
426       __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
427       __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
428       __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
429       __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
430       __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
431       __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
432       __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
433       __ lwl(a4,
434              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
435       __ lwl(a5,
436              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
437       __ lwl(a6,
438              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
439       __ lwl(a7,
440              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
441       __ lwl(t0,
442              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
443       __ lwl(t1,
444              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
445       __ lwl(t2,
446              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
447       __ lwl(t3,
448              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
449     } else {
450       __ lwl(a4, MemOperand(a1, 8, loadstore_chunk));
451       __ lwl(a5, MemOperand(a1, 9, loadstore_chunk));
452       __ lwl(a6, MemOperand(a1, 10, loadstore_chunk));
453       __ lwl(a7, MemOperand(a1, 11, loadstore_chunk));
454       __ lwl(t0, MemOperand(a1, 12, loadstore_chunk));
455       __ lwl(t1, MemOperand(a1, 13, loadstore_chunk));
456       __ lwl(t2, MemOperand(a1, 14, loadstore_chunk));
457       __ lwl(t3, MemOperand(a1, 15, loadstore_chunk));
458       __ lwr(a4,
459              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
460       __ lwr(a5,
461              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
462       __ lwr(a6,
463              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
464       __ lwr(a7,
465              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
466       __ lwr(t0,
467              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
468       __ lwr(t1,
469              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
470       __ lwr(t2,
471              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
472       __ lwr(t3,
473              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
474     }
475     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
476     __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
477     __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
478     __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
479     __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
480     __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
481     __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
482     __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
483     __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
484     __ addiu(a0, a0, 16 * loadstore_chunk);
485     __ bne(a0, a3, &ua_loop16w);
486     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
487     __ mov(a2, t8);
488 
489     // Here less than 64-bytes. Check for
490     // a 32 byte chunk and copy if there is one. Otherwise jump down to
491     // ua_chk1w to handle the tail end of the copy.
492     __ bind(&ua_chkw);
493     __ Pref(pref_hint_load, MemOperand(a1));
494     __ andi(t8, a2, 0x1f);
495 
496     __ beq(a2, t8, &ua_chk1w);
497     __ nop();  // In delay slot.
498     if (kArchEndian == kLittle) {
499       __ lwr(a4, MemOperand(a1));
500       __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
501       __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
502       __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
503       __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
504       __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
505       __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
506       __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
507       __ lwl(a4,
508              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
509       __ lwl(a5,
510              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
511       __ lwl(a6,
512              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
513       __ lwl(a7,
514              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
515       __ lwl(t0,
516              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
517       __ lwl(t1,
518              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
519       __ lwl(t2,
520              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
521       __ lwl(t3,
522              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
523     } else {
524       __ lwl(a4, MemOperand(a1));
525       __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
526       __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
527       __ lwl(a7, MemOperand(a1, 3, loadstore_chunk));
528       __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
529       __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
530       __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
531       __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
532       __ lwr(a4,
533              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
534       __ lwr(a5,
535              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
536       __ lwr(a6,
537              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
538       __ lwr(a7,
539              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
540       __ lwr(t0,
541              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
542       __ lwr(t1,
543              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
544       __ lwr(t2,
545              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
546       __ lwr(t3,
547              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
548     }
549     __ addiu(a1, a1, 8 * loadstore_chunk);
550     __ sw(a4, MemOperand(a0));
551     __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
552     __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
553     __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
554     __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
555     __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
556     __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
557     __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
558     __ addiu(a0, a0, 8 * loadstore_chunk);
559 
560     // Less than 32 bytes to copy. Set up for a loop to
561     // copy one word at a time.
562     __ bind(&ua_chk1w);
563     __ andi(a2, t8, loadstore_chunk - 1);
564     __ beq(a2, t8, &ua_smallCopy);
565     __ subu(a3, t8, a2);  // In delay slot.
566     __ addu(a3, a0, a3);
567 
568     __ bind(&ua_wordCopy_loop);
569     if (kArchEndian == kLittle) {
570       __ lwr(v1, MemOperand(a1));
571       __ lwl(v1,
572              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
573     } else {
574       __ lwl(v1, MemOperand(a1));
575       __ lwr(v1,
576              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
577     }
578     __ addiu(a0, a0, loadstore_chunk);
579     __ addiu(a1, a1, loadstore_chunk);
580     __ bne(a0, a3, &ua_wordCopy_loop);
581     __ sw(v1, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
582 
583     // Copy the last 8 bytes.
584     __ bind(&ua_smallCopy);
585     __ beq(a2, zero_reg, &leave);
586     __ addu(a3, a0, a2);  // In delay slot.
587 
588     __ bind(&ua_smallCopy_loop);
589     __ lb(v1, MemOperand(a1));
590     __ addiu(a0, a0, 1);
591     __ addiu(a1, a1, 1);
592     __ bne(a0, a3, &ua_smallCopy_loop);
593     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
594 
595     __ jr(ra);
596     __ nop();
597   }
598   CodeDesc desc;
599   masm.GetCode(&desc);
600   DCHECK(!RelocInfo::RequiresRelocation(desc));
601 
602   Assembler::FlushICache(isolate, buffer, actual_size);
603   base::OS::ProtectCode(buffer, actual_size);
604   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
605 #endif
606 }
607 #endif
608 
CreateSqrtFunction(Isolate * isolate)609 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
610 #if defined(USE_SIMULATOR)
611   return nullptr;
612 #else
613   size_t actual_size;
614   byte* buffer =
615       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
616   if (buffer == nullptr) return nullptr;
617 
618   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
619                       CodeObjectRequired::kNo);
620 
621   __ MovFromFloatParameter(f12);
622   __ sqrt_d(f0, f12);
623   __ MovToFloatResult(f0);
624   __ Ret();
625 
626   CodeDesc desc;
627   masm.GetCode(&desc);
628   DCHECK(!RelocInfo::RequiresRelocation(desc));
629 
630   Assembler::FlushICache(isolate, buffer, actual_size);
631   base::OS::ProtectCode(buffer, actual_size);
632   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
633 #endif
634 }
635 
636 #undef __
637 
638 
639 // -------------------------------------------------------------------------
640 // Platform-specific RuntimeCallHelper functions.
641 
BeforeCall(MacroAssembler * masm) const642 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
643   masm->EnterFrame(StackFrame::INTERNAL);
644   DCHECK(!masm->has_frame());
645   masm->set_has_frame(true);
646 }
647 
648 
AfterCall(MacroAssembler * masm) const649 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
650   masm->LeaveFrame(StackFrame::INTERNAL);
651   DCHECK(masm->has_frame());
652   masm->set_has_frame(false);
653 }
654 
655 
656 // -------------------------------------------------------------------------
657 // Code generators
658 
659 #define __ ACCESS_MASM(masm)
660 
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)661 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
662     MacroAssembler* masm,
663     Register receiver,
664     Register key,
665     Register value,
666     Register target_map,
667     AllocationSiteMode mode,
668     Label* allocation_memento_found) {
669   Register scratch_elements = a4;
670   DCHECK(!AreAliased(receiver, key, value, target_map,
671                      scratch_elements));
672 
673   if (mode == TRACK_ALLOCATION_SITE) {
674     __ JumpIfJSArrayHasAllocationMemento(
675         receiver, scratch_elements, allocation_memento_found);
676   }
677 
678   // Set transitioned map.
679   __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
680   __ RecordWriteField(receiver,
681                       HeapObject::kMapOffset,
682                       target_map,
683                       t1,
684                       kRAHasNotBeenSaved,
685                       kDontSaveFPRegs,
686                       EMIT_REMEMBERED_SET,
687                       OMIT_SMI_CHECK);
688 }
689 
690 
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)691 void ElementsTransitionGenerator::GenerateSmiToDouble(
692     MacroAssembler* masm,
693     Register receiver,
694     Register key,
695     Register value,
696     Register target_map,
697     AllocationSiteMode mode,
698     Label* fail) {
699   // Register ra contains the return address.
700   Label loop, entry, convert_hole, gc_required, only_change_map, done;
701   Register elements = a4;
702   Register length = a5;
703   Register array = a6;
704   Register array_end = array;
705 
706   // target_map parameter can be clobbered.
707   Register scratch1 = target_map;
708   Register scratch2 = t1;
709   Register scratch3 = a7;
710 
711   // Verify input registers don't conflict with locals.
712   DCHECK(!AreAliased(receiver, key, value, target_map,
713                      elements, length, array, scratch2));
714 
715   Register scratch = t2;
716   if (mode == TRACK_ALLOCATION_SITE) {
717     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
718   }
719 
720   // Check for empty arrays, which only require a map transition and no changes
721   // to the backing store.
722   __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
723   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
724   __ Branch(&only_change_map, eq, at, Operand(elements));
725 
726   __ push(ra);
727   __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
728   // elements: source FixedArray
729   // length: number of elements (smi-tagged)
730 
731   // Allocate new FixedDoubleArray.
732   __ SmiScale(scratch, length, kDoubleSizeLog2);
733   __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
734   __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
735   // array: destination FixedDoubleArray, not tagged as heap object
736 
737   // Set destination FixedDoubleArray's length and map.
738   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
739   __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
740   // Update receiver's map.
741   __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
742 
743   __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
744   __ RecordWriteField(receiver,
745                       HeapObject::kMapOffset,
746                       target_map,
747                       scratch2,
748                       kRAHasBeenSaved,
749                       kDontSaveFPRegs,
750                       OMIT_REMEMBERED_SET,
751                       OMIT_SMI_CHECK);
752   // Replace receiver's backing store with newly created FixedDoubleArray.
753   __ Daddu(scratch1, array, Operand(kHeapObjectTag));
754   __ sd(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
755   __ RecordWriteField(receiver,
756                       JSObject::kElementsOffset,
757                       scratch1,
758                       scratch2,
759                       kRAHasBeenSaved,
760                       kDontSaveFPRegs,
761                       EMIT_REMEMBERED_SET,
762                       OMIT_SMI_CHECK);
763 
764 
765   // Prepare for conversion loop.
766   __ Daddu(scratch1, elements,
767       Operand(FixedArray::kHeaderSize - kHeapObjectTag));
768   __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
769   __ SmiScale(array_end, length, kDoubleSizeLog2);
770   __ Daddu(array_end, array_end, scratch3);
771 
772   // Repurpose registers no longer in use.
773   Register hole_lower = elements;
774   Register hole_upper = length;
775   __ li(hole_lower, Operand(kHoleNanLower32));
776   __ li(hole_upper, Operand(kHoleNanUpper32));
777 
778   // scratch1: begin of source FixedArray element fields, not tagged
779   // hole_lower: kHoleNanLower32
780   // hole_upper: kHoleNanUpper32
781   // array_end: end of destination FixedDoubleArray, not tagged
782   // scratch3: begin of FixedDoubleArray element fields, not tagged
783 
784   __ Branch(&entry);
785 
786   __ bind(&only_change_map);
787   __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
788   __ RecordWriteField(receiver,
789                       HeapObject::kMapOffset,
790                       target_map,
791                       scratch2,
792                       kRAHasBeenSaved,
793                       kDontSaveFPRegs,
794                       OMIT_REMEMBERED_SET,
795                       OMIT_SMI_CHECK);
796   __ Branch(&done);
797 
798   // Call into runtime if GC is required.
799   __ bind(&gc_required);
800   __ ld(ra, MemOperand(sp, 0));
801   __ Branch(USE_DELAY_SLOT, fail);
802   __ daddiu(sp, sp, kPointerSize);  // In delay slot.
803 
804   // Convert and copy elements.
805   __ bind(&loop);
806   __ ld(scratch2, MemOperand(scratch1));
807   __ Daddu(scratch1, scratch1, kPointerSize);
808   // scratch2: current element
809   __ JumpIfNotSmi(scratch2, &convert_hole);
810   __ SmiUntag(scratch2);
811 
812   // Normal smi, convert to double and store.
813   __ mtc1(scratch2, f0);
814   __ cvt_d_w(f0, f0);
815   __ sdc1(f0, MemOperand(scratch3));
816   __ Branch(USE_DELAY_SLOT, &entry);
817   __ daddiu(scratch3, scratch3, kDoubleSize);  // In delay slot.
818 
819   // Hole found, store the-hole NaN.
820   __ bind(&convert_hole);
821   if (FLAG_debug_code) {
822     // Restore a "smi-untagged" heap object.
823     __ Or(scratch2, scratch2, Operand(1));
824     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
825     __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
826   }
827   // mantissa
828   __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
829   // exponent
830   __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
831   __ Daddu(scratch3, scratch3, kDoubleSize);
832 
833   __ bind(&entry);
834   __ Branch(&loop, lt, scratch3, Operand(array_end));
835 
836   __ bind(&done);
837   __ pop(ra);
838 }
839 
840 
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)841 void ElementsTransitionGenerator::GenerateDoubleToObject(
842     MacroAssembler* masm,
843     Register receiver,
844     Register key,
845     Register value,
846     Register target_map,
847     AllocationSiteMode mode,
848     Label* fail) {
849   // Register ra contains the return address.
850   Label entry, loop, convert_hole, gc_required, only_change_map;
851   Register elements = a4;
852   Register array = a6;
853   Register length = a5;
854   Register scratch = t1;
855 
856   // Verify input registers don't conflict with locals.
857   DCHECK(!AreAliased(receiver, key, value, target_map,
858                      elements, array, length, scratch));
859   if (mode == TRACK_ALLOCATION_SITE) {
860     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
861   }
862 
863   // Check for empty arrays, which only require a map transition and no changes
864   // to the backing store.
865   __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
866   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
867   __ Branch(&only_change_map, eq, at, Operand(elements));
868 
869   __ MultiPush(
870       value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
871 
872   __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
873   // elements: source FixedArray
874   // length: number of elements (smi-tagged)
875 
876   // Allocate new FixedArray.
877   // Re-use value and target_map registers, as they have been saved on the
878   // stack.
879   Register array_size = value;
880   Register allocate_scratch = target_map;
881   __ SmiScale(array_size, length, kPointerSizeLog2);
882   __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
883   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
884               NO_ALLOCATION_FLAGS);
885   // array: destination FixedArray, not tagged as heap object
886   // Set destination FixedDoubleArray's length and map.
887   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
888   __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
889   __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
890 
891   // Prepare for conversion loop.
892   Register src_elements = elements;
893   Register dst_elements = target_map;
894   Register dst_end = length;
895   Register heap_number_map = scratch;
896   __ Daddu(src_elements, src_elements,
897            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
898   __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
899   __ SmiScale(dst_end, dst_end, kPointerSizeLog2);
900   __ Daddu(dst_end, dst_elements, dst_end);
901 
902   // Allocating heap numbers in the loop below can fail and cause a jump to
903   // gc_required. We can't leave a partly initialized FixedArray behind,
904   // so pessimistically fill it with holes now.
905   Label initialization_loop, initialization_loop_entry;
906   __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
907   __ Branch(&initialization_loop_entry);
908   __ bind(&initialization_loop);
909   __ sd(scratch, MemOperand(dst_elements));
910   __ Daddu(dst_elements, dst_elements, Operand(kPointerSize));
911   __ bind(&initialization_loop_entry);
912   __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
913 
914   __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
915   __ Daddu(array, array, Operand(kHeapObjectTag));
916   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
917   // Using offsetted addresses.
918   // dst_elements: begin of destination FixedArray element fields, not tagged
919   // src_elements: begin of source FixedDoubleArray element fields, not tagged,
920   //               points to the exponent
921   // dst_end: end of destination FixedArray, not tagged
922   // array: destination FixedArray
923   // heap_number_map: heap number map
924   __ Branch(&entry);
925 
926   // Call into runtime if GC is required.
927   __ bind(&gc_required);
928   __ MultiPop(
929       value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
930 
931   __ Branch(fail);
932 
933   __ bind(&loop);
934   Register upper_bits = key;
935   __ lw(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
936   __ Daddu(src_elements, src_elements, kDoubleSize);
937   // upper_bits: current element's upper 32 bit
938   // src_elements: address of next element
939   __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
940 
941   // Non-hole double, copy value into a heap number.
942   Register heap_number = receiver;
943   Register scratch2 = value;
944   Register scratch3 = t2;
945   __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
946                         &gc_required);
947   // heap_number: new heap number
948   // Load current element, src_elements point to next element.
949 
950   __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
951   __ sd(scratch2, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
952 
953   __ mov(scratch2, dst_elements);
954   __ sd(heap_number, MemOperand(dst_elements));
955   __ Daddu(dst_elements, dst_elements, kPointerSize);
956   __ RecordWrite(array,
957                  scratch2,
958                  heap_number,
959                  kRAHasBeenSaved,
960                  kDontSaveFPRegs,
961                  EMIT_REMEMBERED_SET,
962                  OMIT_SMI_CHECK);
963   __ Branch(&entry);
964 
965   // Replace the-hole NaN with the-hole pointer.
966   __ bind(&convert_hole);
967   __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
968   __ sd(scratch2, MemOperand(dst_elements));
969   __ Daddu(dst_elements, dst_elements, kPointerSize);
970 
971   __ bind(&entry);
972   __ Branch(&loop, lt, dst_elements, Operand(dst_end));
973 
974   __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
975   // Replace receiver's backing store with newly created and filled FixedArray.
976   __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
977   __ RecordWriteField(receiver,
978                       JSObject::kElementsOffset,
979                       array,
980                       scratch,
981                       kRAHasBeenSaved,
982                       kDontSaveFPRegs,
983                       EMIT_REMEMBERED_SET,
984                       OMIT_SMI_CHECK);
985   __ pop(ra);
986 
987   __ bind(&only_change_map);
988   // Update receiver's map.
989   __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
990   __ RecordWriteField(receiver,
991                       HeapObject::kMapOffset,
992                       target_map,
993                       scratch,
994                       kRAHasNotBeenSaved,
995                       kDontSaveFPRegs,
996                       OMIT_REMEMBERED_SET,
997                       OMIT_SMI_CHECK);
998 }
999 
1000 
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)1001 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
1002                                        Register string,
1003                                        Register index,
1004                                        Register result,
1005                                        Label* call_runtime) {
1006   // Fetch the instance type of the receiver into result register.
1007   __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
1008   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
1009 
1010   // We need special handling for indirect strings.
1011   Label check_sequential;
1012   __ And(at, result, Operand(kIsIndirectStringMask));
1013   __ Branch(&check_sequential, eq, at, Operand(zero_reg));
1014 
1015   // Dispatch on the indirect string shape: slice or cons.
1016   Label cons_string;
1017   __ And(at, result, Operand(kSlicedNotConsMask));
1018   __ Branch(&cons_string, eq, at, Operand(zero_reg));
1019 
1020   // Handle slices.
1021   Label indirect_string_loaded;
1022   __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
1023   __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
1024   __ dsra32(at, result, 0);
1025   __ Daddu(index, index, at);
1026   __ jmp(&indirect_string_loaded);
1027 
1028   // Handle cons strings.
1029   // Check whether the right hand side is the empty string (i.e. if
1030   // this is really a flat string in a cons string). If that is not
1031   // the case we would rather go to the runtime system now to flatten
1032   // the string.
1033   __ bind(&cons_string);
1034   __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
1035   __ LoadRoot(at, Heap::kempty_stringRootIndex);
1036   __ Branch(call_runtime, ne, result, Operand(at));
1037   // Get the first of the two strings and load its instance type.
1038   __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
1039 
1040   __ bind(&indirect_string_loaded);
1041   __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
1042   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
1043 
1044   // Distinguish sequential and external strings. Only these two string
1045   // representations can reach here (slices and flat cons strings have been
1046   // reduced to the underlying sequential or external string).
1047   Label external_string, check_encoding;
1048   __ bind(&check_sequential);
1049   STATIC_ASSERT(kSeqStringTag == 0);
1050   __ And(at, result, Operand(kStringRepresentationMask));
1051   __ Branch(&external_string, ne, at, Operand(zero_reg));
1052 
1053   // Prepare sequential strings
1054   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1055   __ Daddu(string,
1056           string,
1057           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
1058   __ jmp(&check_encoding);
1059 
1060   // Handle external strings.
1061   __ bind(&external_string);
1062   if (FLAG_debug_code) {
1063     // Assert that we do not have a cons or slice (indirect strings) here.
1064     // Sequential strings have already been ruled out.
1065     __ And(at, result, Operand(kIsIndirectStringMask));
1066     __ Assert(eq, kExternalStringExpectedButNotFound,
1067         at, Operand(zero_reg));
1068   }
1069   // Rule out short external strings.
1070   STATIC_ASSERT(kShortExternalStringTag != 0);
1071   __ And(at, result, Operand(kShortExternalStringMask));
1072   __ Branch(call_runtime, ne, at, Operand(zero_reg));
1073   __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
1074 
1075   Label one_byte, done;
1076   __ bind(&check_encoding);
1077   STATIC_ASSERT(kTwoByteStringTag == 0);
1078   __ And(at, result, Operand(kStringEncodingMask));
1079   __ Branch(&one_byte, ne, at, Operand(zero_reg));
1080   // Two-byte string.
1081   __ dsll(at, index, 1);
1082   __ Daddu(at, string, at);
1083   __ lhu(result, MemOperand(at));
1084   __ jmp(&done);
1085   __ bind(&one_byte);
1086   // One_byte string.
1087   __ Daddu(at, string, index);
1088   __ lbu(result, MemOperand(at));
1089   __ bind(&done);
1090 }
1091 
1092 
ExpConstant(int index,Register base)1093 static MemOperand ExpConstant(int index, Register base) {
1094   return MemOperand(base, index * kDoubleSize);
1095 }
1096 
1097 
EmitMathExp(MacroAssembler * masm,DoubleRegister input,DoubleRegister result,DoubleRegister double_scratch1,DoubleRegister double_scratch2,Register temp1,Register temp2,Register temp3)1098 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
1099                                    DoubleRegister input,
1100                                    DoubleRegister result,
1101                                    DoubleRegister double_scratch1,
1102                                    DoubleRegister double_scratch2,
1103                                    Register temp1,
1104                                    Register temp2,
1105                                    Register temp3) {
1106   DCHECK(!input.is(result));
1107   DCHECK(!input.is(double_scratch1));
1108   DCHECK(!input.is(double_scratch2));
1109   DCHECK(!result.is(double_scratch1));
1110   DCHECK(!result.is(double_scratch2));
1111   DCHECK(!double_scratch1.is(double_scratch2));
1112   DCHECK(!temp1.is(temp2));
1113   DCHECK(!temp1.is(temp3));
1114   DCHECK(!temp2.is(temp3));
1115   DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
1116   DCHECK(!masm->serializer_enabled());  // External references not serializable.
1117 
1118   Label zero, infinity, done;
1119   __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
1120 
1121   __ ldc1(double_scratch1, ExpConstant(0, temp3));
1122   __ BranchF(&zero, NULL, ge, double_scratch1, input);
1123 
1124   __ ldc1(double_scratch2, ExpConstant(1, temp3));
1125   __ BranchF(&infinity, NULL, ge, input, double_scratch2);
1126 
1127   __ ldc1(double_scratch1, ExpConstant(3, temp3));
1128   __ ldc1(result, ExpConstant(4, temp3));
1129   __ mul_d(double_scratch1, double_scratch1, input);
1130   __ add_d(double_scratch1, double_scratch1, result);
1131   __ FmoveLow(temp2, double_scratch1);
1132   __ sub_d(double_scratch1, double_scratch1, result);
1133   __ ldc1(result, ExpConstant(6, temp3));
1134   __ ldc1(double_scratch2, ExpConstant(5, temp3));
1135   __ mul_d(double_scratch1, double_scratch1, double_scratch2);
1136   __ sub_d(double_scratch1, double_scratch1, input);
1137   __ sub_d(result, result, double_scratch1);
1138   __ mul_d(double_scratch2, double_scratch1, double_scratch1);
1139   __ mul_d(result, result, double_scratch2);
1140   __ ldc1(double_scratch2, ExpConstant(7, temp3));
1141   __ mul_d(result, result, double_scratch2);
1142   __ sub_d(result, result, double_scratch1);
1143   // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
1144   DCHECK(*reinterpret_cast<double*>
1145          (ExternalReference::math_exp_constants(8).address()) == 1);
1146   __ Move(double_scratch2, 1.);
1147   __ add_d(result, result, double_scratch2);
1148   __ dsrl(temp1, temp2, 11);
1149   __ Ext(temp2, temp2, 0, 11);
1150   __ Daddu(temp1, temp1, Operand(0x3ff));
1151 
1152   // Must not call ExpConstant() after overwriting temp3!
1153   __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1154   __ dsll(at, temp2, 3);
1155   __ Daddu(temp3, temp3, Operand(at));
1156   __ lwu(temp2, MemOperand(temp3, Register::kMantissaOffset));
1157   __ lwu(temp3, MemOperand(temp3, Register::kExponentOffset));
1158   // The first word is loaded is the lower number register.
1159   if (temp2.code() < temp3.code()) {
1160     __ dsll(at, temp1, 20);
1161     __ Or(temp1, temp3, at);
1162     __ Move(double_scratch1, temp2, temp1);
1163   } else {
1164     __ dsll(at, temp1, 20);
1165     __ Or(temp1, temp2, at);
1166     __ Move(double_scratch1, temp3, temp1);
1167   }
1168   __ mul_d(result, result, double_scratch1);
1169   __ BranchShort(&done);
1170 
1171   __ bind(&zero);
1172   __ Move(result, kDoubleRegZero);
1173   __ BranchShort(&done);
1174 
1175   __ bind(&infinity);
1176   __ ldc1(result, ExpConstant(2, temp3));
1177 
1178   __ bind(&done);
1179 }
1180 
1181 #ifdef DEBUG
1182 // nop(CODE_AGE_MARKER_NOP)
1183 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1184 #endif
1185 
1186 
CodeAgingHelper(Isolate * isolate)1187 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
1188   USE(isolate);
1189   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
1190   // Since patcher is a large object, allocate it dynamically when needed,
1191   // to avoid overloading the stack in stress conditions.
1192   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
1193   // the process, before MIPS simulator ICache is setup.
1194   base::SmartPointer<CodePatcher> patcher(
1195       new CodePatcher(isolate, young_sequence_.start(),
1196                       young_sequence_.length() / Assembler::kInstrSize,
1197                       CodePatcher::DONT_FLUSH));
1198   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
1199   patcher->masm()->Push(ra, fp, cp, a1);
1200   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1201   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1202   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1203   patcher->masm()->Daddu(
1204       fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
1205 }
1206 
1207 
1208 #ifdef DEBUG
IsOld(byte * candidate) const1209 bool CodeAgingHelper::IsOld(byte* candidate) const {
1210   return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
1211 }
1212 #endif
1213 
1214 
IsYoungSequence(Isolate * isolate,byte * sequence)1215 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
1216   bool result = isolate->code_aging_helper()->IsYoung(sequence);
1217   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
1218   return result;
1219 }
1220 
1221 
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)1222 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
1223                                MarkingParity* parity) {
1224   if (IsYoungSequence(isolate, sequence)) {
1225     *age = kNoAgeCodeAge;
1226     *parity = NO_MARKING_PARITY;
1227   } else {
1228     Address target_address = Assembler::target_address_at(
1229         sequence + Assembler::kInstrSize);
1230     Code* stub = GetCodeFromTargetAddress(target_address);
1231     GetCodeAgeAndParity(stub, age, parity);
1232   }
1233 }
1234 
1235 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)1236 void Code::PatchPlatformCodeAge(Isolate* isolate,
1237                                 byte* sequence,
1238                                 Code::Age age,
1239                                 MarkingParity parity) {
1240   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
1241   if (age == kNoAgeCodeAge) {
1242     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
1243     Assembler::FlushICache(isolate, sequence, young_length);
1244   } else {
1245     Code* stub = GetCodeAgeStub(isolate, age, parity);
1246     CodePatcher patcher(isolate, sequence,
1247                         young_length / Assembler::kInstrSize);
1248     // Mark this code sequence for FindPlatformCodeAgeSequence().
1249     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
1250     // Load the stub address to t9 and call it,
1251     // GetCodeAgeAndParity() extracts the stub address from this instruction.
1252     patcher.masm()->li(
1253         t9,
1254         Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
1255         ADDRESS_LOAD);
1256     patcher.masm()->nop();  // Prevent jalr to jal optimization.
1257     patcher.masm()->jalr(t9, a0);
1258     patcher.masm()->nop();  // Branch delay slot nop.
1259     patcher.masm()->nop();  // Pad the empty space.
1260   }
1261 }
1262 
1263 
1264 #undef __
1265 
1266 }  // namespace internal
1267 }  // namespace v8
1268 
1269 #endif  // V8_TARGET_ARCH_MIPS64
1270