• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/mips/codegen-mips.h"
6 
7 #if V8_TARGET_ARCH_MIPS
8 
9 #include <memory>
10 
11 #include "src/codegen.h"
12 #include "src/macro-assembler.h"
13 #include "src/mips/simulator-mips.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 
19 #define __ masm.
20 
21 #if defined(V8_HOST_ARCH_MIPS)
CreateMemCopyUint8Function(Isolate * isolate,MemCopyUint8Function stub)22 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
23                                                 MemCopyUint8Function stub) {
24 #if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
25     defined(_MIPS_ARCH_MIPS32RX)
26   return stub;
27 #else
28   size_t actual_size;
29   byte* buffer =
30       static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
31   if (buffer == nullptr) return stub;
32 
33   // This code assumes that cache lines are 32 bytes and if the cache line is
34   // larger it will not work correctly.
35   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
36                       CodeObjectRequired::kNo);
37 
38   {
39     Label lastb, unaligned, aligned, chkw,
40           loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
41           leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
42           ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
43 
44     // The size of each prefetch.
45     uint32_t pref_chunk = 32;
46     // The maximum size of a prefetch, it must not be less than pref_chunk.
47     // If the real size of a prefetch is greater than max_pref_size and
48     // the kPrefHintPrepareForStore hint is used, the code will not work
49     // correctly.
50     uint32_t max_pref_size = 128;
51     DCHECK(pref_chunk < max_pref_size);
52 
53     // pref_limit is set based on the fact that we never use an offset
54     // greater then 5 on a store pref and that a single pref can
55     // never be larger then max_pref_size.
56     uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
57     int32_t pref_hint_load = kPrefHintLoadStreamed;
58     int32_t pref_hint_store = kPrefHintPrepareForStore;
59     uint32_t loadstore_chunk = 4;
60 
61     // The initial prefetches may fetch bytes that are before the buffer being
62     // copied. Start copies with an offset of 4 so avoid this situation when
63     // using kPrefHintPrepareForStore.
64     DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
65            pref_chunk * 4 >= max_pref_size);
66 
67     // If the size is less than 8, go to lastb. Regardless of size,
68     // copy dst pointer to v0 for the retuen value.
69     __ slti(t2, a2, 2 * loadstore_chunk);
70     __ bne(t2, zero_reg, &lastb);
71     __ mov(v0, a0);  // In delay slot.
72 
73     // If src and dst have different alignments, go to unaligned, if they
74     // have the same alignment (but are not actually aligned) do a partial
75     // load/store to make them aligned. If they are both already aligned
76     // we can start copying at aligned.
77     __ xor_(t8, a1, a0);
78     __ andi(t8, t8, loadstore_chunk - 1);  // t8 is a0/a1 word-displacement.
79     __ bne(t8, zero_reg, &unaligned);
80     __ subu(a3, zero_reg, a0);  // In delay slot.
81 
82     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
83     __ beq(a3, zero_reg, &aligned);  // Already aligned.
84     __ subu(a2, a2, a3);  // In delay slot. a2 is the remining bytes count.
85 
86     if (kArchEndian == kLittle) {
87       __ lwr(t8, MemOperand(a1));
88       __ addu(a1, a1, a3);
89       __ swr(t8, MemOperand(a0));
90       __ addu(a0, a0, a3);
91     } else {
92       __ lwl(t8, MemOperand(a1));
93       __ addu(a1, a1, a3);
94       __ swl(t8, MemOperand(a0));
95       __ addu(a0, a0, a3);
96     }
97     // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
98     // count how many bytes we have to copy after all the 64 byte chunks are
99     // copied and a3 to the dst pointer after all the 64 byte chunks have been
100     // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
101     __ bind(&aligned);
102     __ andi(t8, a2, 0x3f);
103     __ beq(a2, t8, &chkw);  // Less than 64?
104     __ subu(a3, a2, t8);  // In delay slot.
105     __ addu(a3, a0, a3);  // Now a3 is the final dst after loop.
106 
107     // When in the loop we prefetch with kPrefHintPrepareForStore hint,
108     // in this case the a0+x should be past the "t0-32" address. This means:
109     // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
110     // x=64 the last "safe" a0 address is "t0-96". In the current version we
111     // will use "pref hint, 128(a0)", so "t0-160" is the limit.
112     if (pref_hint_store == kPrefHintPrepareForStore) {
113       __ addu(t0, a0, a2);  // t0 is the "past the end" address.
114       __ Subu(t9, t0, pref_limit);  // t9 is the "last safe pref" address.
115     }
116 
117     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
118     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
119     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
120     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
121 
122     if (pref_hint_store != kPrefHintPrepareForStore) {
123       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
124       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
125       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
126     }
127     __ bind(&loop16w);
128     __ lw(t0, MemOperand(a1));
129 
130     if (pref_hint_store == kPrefHintPrepareForStore) {
131       __ sltu(v1, t9, a0);  // If a0 > t9, don't use next prefetch.
132       __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
133     }
134     __ lw(t1, MemOperand(a1, 1, loadstore_chunk));  // Maybe in delay slot.
135 
136     __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
137     __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
138 
139     __ bind(&skip_pref);
140     __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
141     __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
142     __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
143     __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
144     __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
145     __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
146     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
147 
148     __ sw(t0, MemOperand(a0));
149     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
150     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
151     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
152     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
153     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
154     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
155     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
156 
157     __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
158     __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
159     __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
160     __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
161     __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
162     __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
163     __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
164     __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
165     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
166 
167     __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
168     __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
169     __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
170     __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
171     __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
172     __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
173     __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
174     __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
175     __ addiu(a0, a0, 16 * loadstore_chunk);
176     __ bne(a0, a3, &loop16w);
177     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
178     __ mov(a2, t8);
179 
180     // Here we have src and dest word-aligned but less than 64-bytes to go.
181     // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
182     // down to chk1w to handle the tail end of the copy.
183     __ bind(&chkw);
184     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
185     __ andi(t8, a2, 0x1f);
186     __ beq(a2, t8, &chk1w);  // Less than 32?
187     __ nop();  // In delay slot.
188     __ lw(t0, MemOperand(a1));
189     __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
190     __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
191     __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
192     __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
193     __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
194     __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
195     __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
196     __ addiu(a1, a1, 8 * loadstore_chunk);
197     __ sw(t0, MemOperand(a0));
198     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
199     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
200     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
201     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
202     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
203     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
204     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
205     __ addiu(a0, a0, 8 * loadstore_chunk);
206 
207     // Here we have less than 32 bytes to copy. Set up for a loop to copy
208     // one word at a time. Set a2 to count how many bytes we have to copy
209     // after all the word chunks are copied and a3 to the dst pointer after
210     // all the word chunks have been copied. We will loop, incrementing a0
211     // and a1 untill a0 equals a3.
212     __ bind(&chk1w);
213     __ andi(a2, t8, loadstore_chunk - 1);
214     __ beq(a2, t8, &lastb);
215     __ subu(a3, t8, a2);  // In delay slot.
216     __ addu(a3, a0, a3);
217 
218     __ bind(&wordCopy_loop);
219     __ lw(t3, MemOperand(a1));
220     __ addiu(a0, a0, loadstore_chunk);
221     __ addiu(a1, a1, loadstore_chunk);
222     __ bne(a0, a3, &wordCopy_loop);
223     __ sw(t3, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
224 
225     __ bind(&lastb);
226     __ Branch(&leave, le, a2, Operand(zero_reg));
227     __ addu(a3, a0, a2);
228 
229     __ bind(&lastbloop);
230     __ lb(v1, MemOperand(a1));
231     __ addiu(a0, a0, 1);
232     __ addiu(a1, a1, 1);
233     __ bne(a0, a3, &lastbloop);
234     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
235 
236     __ bind(&leave);
237     __ jr(ra);
238     __ nop();
239 
240     // Unaligned case. Only the dst gets aligned so we need to do partial
241     // loads of the source followed by normal stores to the dst (once we
242     // have aligned the destination).
243     __ bind(&unaligned);
244     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
245     __ beq(a3, zero_reg, &ua_chk16w);
246     __ subu(a2, a2, a3);  // In delay slot.
247 
248     if (kArchEndian == kLittle) {
249       __ lwr(v1, MemOperand(a1));
250       __ lwl(v1,
251              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
252       __ addu(a1, a1, a3);
253       __ swr(v1, MemOperand(a0));
254       __ addu(a0, a0, a3);
255     } else {
256       __ lwl(v1, MemOperand(a1));
257       __ lwr(v1,
258              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
259       __ addu(a1, a1, a3);
260       __ swl(v1, MemOperand(a0));
261       __ addu(a0, a0, a3);
262     }
263 
264     // Now the dst (but not the source) is aligned. Set a2 to count how many
265     // bytes we have to copy after all the 64 byte chunks are copied and a3 to
266     // the dst pointer after all the 64 byte chunks have been copied. We will
267     // loop, incrementing a0 and a1 until a0 equals a3.
268     __ bind(&ua_chk16w);
269     __ andi(t8, a2, 0x3f);
270     __ beq(a2, t8, &ua_chkw);
271     __ subu(a3, a2, t8);  // In delay slot.
272     __ addu(a3, a0, a3);
273 
274     if (pref_hint_store == kPrefHintPrepareForStore) {
275       __ addu(t0, a0, a2);
276       __ Subu(t9, t0, pref_limit);
277     }
278 
279     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
280     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
281     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
282 
283     if (pref_hint_store != kPrefHintPrepareForStore) {
284       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
285       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
286       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
287     }
288 
289     __ bind(&ua_loop16w);
290     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
291     if (kArchEndian == kLittle) {
292       __ lwr(t0, MemOperand(a1));
293       __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
294       __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
295 
296       if (pref_hint_store == kPrefHintPrepareForStore) {
297         __ sltu(v1, t9, a0);
298         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
299       }
300       __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
301 
302       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
303       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
304 
305       __ bind(&ua_skip_pref);
306       __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
307       __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
308       __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
309       __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
310       __ lwl(t0,
311              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
312       __ lwl(t1,
313              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
314       __ lwl(t2,
315              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
316       __ lwl(t3,
317              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
318       __ lwl(t4,
319              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
320       __ lwl(t5,
321              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
322       __ lwl(t6,
323              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
324       __ lwl(t7,
325              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
326     } else {
327       __ lwl(t0, MemOperand(a1));
328       __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
329       __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
330 
331       if (pref_hint_store == kPrefHintPrepareForStore) {
332         __ sltu(v1, t9, a0);
333         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
334       }
335       __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
336 
337       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
338       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
339 
340       __ bind(&ua_skip_pref);
341       __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
342       __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
343       __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
344       __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
345       __ lwr(t0,
346              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
347       __ lwr(t1,
348              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
349       __ lwr(t2,
350              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
351       __ lwr(t3,
352              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
353       __ lwr(t4,
354              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
355       __ lwr(t5,
356              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
357       __ lwr(t6,
358              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
359       __ lwr(t7,
360              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
361     }
362     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
363     __ sw(t0, MemOperand(a0));
364     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
365     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
366     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
367     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
368     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
369     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
370     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
371     if (kArchEndian == kLittle) {
372       __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
373       __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
374       __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
375       __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
376       __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
377       __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
378       __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
379       __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
380       __ lwl(t0,
381              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
382       __ lwl(t1,
383              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
384       __ lwl(t2,
385              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
386       __ lwl(t3,
387              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
388       __ lwl(t4,
389              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
390       __ lwl(t5,
391              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
392       __ lwl(t6,
393              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
394       __ lwl(t7,
395              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
396     } else {
397       __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
398       __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
399       __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
400       __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
401       __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
402       __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
403       __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
404       __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
405       __ lwr(t0,
406              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
407       __ lwr(t1,
408              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
409       __ lwr(t2,
410              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
411       __ lwr(t3,
412              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
413       __ lwr(t4,
414              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
415       __ lwr(t5,
416              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
417       __ lwr(t6,
418              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
419       __ lwr(t7,
420              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
421     }
422     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
423     __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
424     __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
425     __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
426     __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
427     __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
428     __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
429     __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
430     __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
431     __ addiu(a0, a0, 16 * loadstore_chunk);
432     __ bne(a0, a3, &ua_loop16w);
433     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
434     __ mov(a2, t8);
435 
436     // Here less than 64-bytes. Check for
437     // a 32 byte chunk and copy if there is one. Otherwise jump down to
438     // ua_chk1w to handle the tail end of the copy.
439     __ bind(&ua_chkw);
440     __ Pref(pref_hint_load, MemOperand(a1));
441     __ andi(t8, a2, 0x1f);
442 
443     __ beq(a2, t8, &ua_chk1w);
444     __ nop();  // In delay slot.
445     if (kArchEndian == kLittle) {
446       __ lwr(t0, MemOperand(a1));
447       __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
448       __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
449       __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
450       __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
451       __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
452       __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
453       __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
454       __ lwl(t0,
455              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
456       __ lwl(t1,
457              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
458       __ lwl(t2,
459              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
460       __ lwl(t3,
461              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
462       __ lwl(t4,
463              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
464       __ lwl(t5,
465              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
466       __ lwl(t6,
467              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
468       __ lwl(t7,
469              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
470     } else {
471       __ lwl(t0, MemOperand(a1));
472       __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
473       __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
474       __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
475       __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
476       __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
477       __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
478       __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
479       __ lwr(t0,
480              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
481       __ lwr(t1,
482              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
483       __ lwr(t2,
484              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
485       __ lwr(t3,
486              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
487       __ lwr(t4,
488              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
489       __ lwr(t5,
490              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
491       __ lwr(t6,
492              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
493       __ lwr(t7,
494              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
495     }
496     __ addiu(a1, a1, 8 * loadstore_chunk);
497     __ sw(t0, MemOperand(a0));
498     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
499     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
500     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
501     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
502     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
503     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
504     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
505     __ addiu(a0, a0, 8 * loadstore_chunk);
506 
507     // Less than 32 bytes to copy. Set up for a loop to
508     // copy one word at a time.
509     __ bind(&ua_chk1w);
510     __ andi(a2, t8, loadstore_chunk - 1);
511     __ beq(a2, t8, &ua_smallCopy);
512     __ subu(a3, t8, a2);  // In delay slot.
513     __ addu(a3, a0, a3);
514 
515     __ bind(&ua_wordCopy_loop);
516     if (kArchEndian == kLittle) {
517       __ lwr(v1, MemOperand(a1));
518       __ lwl(v1,
519              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
520     } else {
521       __ lwl(v1, MemOperand(a1));
522       __ lwr(v1,
523              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
524     }
525     __ addiu(a0, a0, loadstore_chunk);
526     __ addiu(a1, a1, loadstore_chunk);
527     __ bne(a0, a3, &ua_wordCopy_loop);
528     __ sw(v1, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
529 
530     // Copy the last 8 bytes.
531     __ bind(&ua_smallCopy);
532     __ beq(a2, zero_reg, &leave);
533     __ addu(a3, a0, a2);  // In delay slot.
534 
535     __ bind(&ua_smallCopy_loop);
536     __ lb(v1, MemOperand(a1));
537     __ addiu(a0, a0, 1);
538     __ addiu(a1, a1, 1);
539     __ bne(a0, a3, &ua_smallCopy_loop);
540     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
541 
542     __ jr(ra);
543     __ nop();
544   }
545   CodeDesc desc;
546   masm.GetCode(&desc);
547   DCHECK(!RelocInfo::RequiresRelocation(desc));
548 
549   Assembler::FlushICache(isolate, buffer, actual_size);
550   base::OS::ProtectCode(buffer, actual_size);
551   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
552 #endif
553 }
554 #endif
555 
CreateSqrtFunction(Isolate * isolate)556 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
557 #if defined(USE_SIMULATOR)
558   return nullptr;
559 #else
560   size_t actual_size;
561   byte* buffer =
562       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
563   if (buffer == nullptr) return nullptr;
564 
565   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
566                       CodeObjectRequired::kNo);
567 
568   __ MovFromFloatParameter(f12);
569   __ sqrt_d(f0, f12);
570   __ MovToFloatResult(f0);
571   __ Ret();
572 
573   CodeDesc desc;
574   masm.GetCode(&desc);
575   DCHECK(!RelocInfo::RequiresRelocation(desc));
576 
577   Assembler::FlushICache(isolate, buffer, actual_size);
578   base::OS::ProtectCode(buffer, actual_size);
579   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
580 #endif
581 }
582 
583 #undef __
584 
585 
586 // -------------------------------------------------------------------------
587 // Platform-specific RuntimeCallHelper functions.
588 
BeforeCall(MacroAssembler * masm) const589 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
590   masm->EnterFrame(StackFrame::INTERNAL);
591   DCHECK(!masm->has_frame());
592   masm->set_has_frame(true);
593 }
594 
595 
AfterCall(MacroAssembler * masm) const596 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
597   masm->LeaveFrame(StackFrame::INTERNAL);
598   DCHECK(masm->has_frame());
599   masm->set_has_frame(false);
600 }
601 
602 
603 // -------------------------------------------------------------------------
604 // Code generators
605 
606 #define __ ACCESS_MASM(masm)
607 
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)608 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
609                                        Register string,
610                                        Register index,
611                                        Register result,
612                                        Label* call_runtime) {
613   Label indirect_string_loaded;
614   __ bind(&indirect_string_loaded);
615 
616   // Fetch the instance type of the receiver into result register.
617   __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
618   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
619 
620   // We need special handling for indirect strings.
621   Label check_sequential;
622   __ And(at, result, Operand(kIsIndirectStringMask));
623   __ Branch(&check_sequential, eq, at, Operand(zero_reg));
624 
625   // Dispatch on the indirect string shape: slice or cons.
626   Label cons_string, thin_string;
627   __ And(at, result, Operand(kStringRepresentationMask));
628   __ Branch(&cons_string, eq, at, Operand(kConsStringTag));
629   __ Branch(&thin_string, eq, at, Operand(kThinStringTag));
630 
631   // Handle slices.
632   __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
633   __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
634   __ sra(at, result, kSmiTagSize);
635   __ Addu(index, index, at);
636   __ jmp(&indirect_string_loaded);
637 
638   // Handle thin strings.
639   __ bind(&thin_string);
640   __ lw(string, FieldMemOperand(string, ThinString::kActualOffset));
641   __ jmp(&indirect_string_loaded);
642 
643   // Handle cons strings.
644   // Check whether the right hand side is the empty string (i.e. if
645   // this is really a flat string in a cons string). If that is not
646   // the case we would rather go to the runtime system now to flatten
647   // the string.
648   __ bind(&cons_string);
649   __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
650   __ LoadRoot(at, Heap::kempty_stringRootIndex);
651   __ Branch(call_runtime, ne, result, Operand(at));
652   // Get the first of the two strings and load its instance type.
653   __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
654   __ jmp(&indirect_string_loaded);
655 
656   // Distinguish sequential and external strings. Only these two string
657   // representations can reach here (slices and flat cons strings have been
658   // reduced to the underlying sequential or external string).
659   Label external_string, check_encoding;
660   __ bind(&check_sequential);
661   STATIC_ASSERT(kSeqStringTag == 0);
662   __ And(at, result, Operand(kStringRepresentationMask));
663   __ Branch(&external_string, ne, at, Operand(zero_reg));
664 
665   // Prepare sequential strings
666   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
667   __ Addu(string,
668           string,
669           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
670   __ jmp(&check_encoding);
671 
672   // Handle external strings.
673   __ bind(&external_string);
674   if (FLAG_debug_code) {
675     // Assert that we do not have a cons or slice (indirect strings) here.
676     // Sequential strings have already been ruled out.
677     __ And(at, result, Operand(kIsIndirectStringMask));
678     __ Assert(eq, kExternalStringExpectedButNotFound,
679         at, Operand(zero_reg));
680   }
681   // Rule out short external strings.
682   STATIC_ASSERT(kShortExternalStringTag != 0);
683   __ And(at, result, Operand(kShortExternalStringMask));
684   __ Branch(call_runtime, ne, at, Operand(zero_reg));
685   __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
686 
687   Label one_byte, done;
688   __ bind(&check_encoding);
689   STATIC_ASSERT(kTwoByteStringTag == 0);
690   __ And(at, result, Operand(kStringEncodingMask));
691   __ Branch(&one_byte, ne, at, Operand(zero_reg));
692   // Two-byte string.
693   __ Lsa(at, string, index, 1);
694   __ lhu(result, MemOperand(at));
695   __ jmp(&done);
696   __ bind(&one_byte);
697   // One_byte string.
698   __ Addu(at, string, index);
699   __ lbu(result, MemOperand(at));
700   __ bind(&done);
701 }
702 
703 #ifdef DEBUG
704 // nop(CODE_AGE_MARKER_NOP)
705 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
706 #endif
707 
708 
CodeAgingHelper(Isolate * isolate)709 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
710   USE(isolate);
711   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
712   // Since patcher is a large object, allocate it dynamically when needed,
713   // to avoid overloading the stack in stress conditions.
714   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
715   // the process, before MIPS simulator ICache is setup.
716   std::unique_ptr<CodePatcher> patcher(
717       new CodePatcher(isolate, young_sequence_.start(),
718                       young_sequence_.length() / Assembler::kInstrSize,
719                       CodePatcher::DONT_FLUSH));
720   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
721   patcher->masm()->PushStandardFrame(a1);
722   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
723 }
724 
725 
726 #ifdef DEBUG
IsOld(byte * candidate) const727 bool CodeAgingHelper::IsOld(byte* candidate) const {
728   return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
729 }
730 #endif
731 
732 
IsYoungSequence(Isolate * isolate,byte * sequence)733 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
734   bool result = isolate->code_aging_helper()->IsYoung(sequence);
735   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
736   return result;
737 }
738 
GetCodeAge(Isolate * isolate,byte * sequence)739 Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
740   if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
741 
742   Address target_address =
743       Assembler::target_address_at(sequence + Assembler::kInstrSize);
744   Code* stub = GetCodeFromTargetAddress(target_address);
745   return GetAgeOfCodeAgeStub(stub);
746 }
747 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age)748 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
749                                 Code::Age age) {
750   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
751   if (age == kNoAgeCodeAge) {
752     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
753     Assembler::FlushICache(isolate, sequence, young_length);
754   } else {
755     Code* stub = GetCodeAgeStub(isolate, age);
756     CodePatcher patcher(isolate, sequence,
757                         young_length / Assembler::kInstrSize);
758     // Mark this code sequence for FindPlatformCodeAgeSequence().
759     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
760     // Load the stub address to t9 and call it,
761     // GetCodeAge() extracts the stub address from this instruction.
762     patcher.masm()->li(
763         t9,
764         Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
765         CONSTANT_SIZE);
766     patcher.masm()->nop();  // Prevent jalr to jal optimization.
767     patcher.masm()->jalr(t9, a0);
768     patcher.masm()->nop();  // Branch delay slot nop.
769     patcher.masm()->nop();  // Pad the empty space.
770   }
771 }
772 
773 
774 #undef __
775 
776 }  // namespace internal
777 }  // namespace v8
778 
779 #endif  // V8_TARGET_ARCH_MIPS
780