1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if defined(V8_TARGET_ARCH_MIPS)
31
32 #include "codegen.h"
33 #include "macro-assembler.h"
34
35 namespace v8 {
36 namespace internal {
37
38 #define __ ACCESS_MASM(masm)
39
CreateTranscendentalFunction(TranscendentalCache::Type type)40 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
41 switch (type) {
42 case TranscendentalCache::SIN: return &sin;
43 case TranscendentalCache::COS: return &cos;
44 case TranscendentalCache::TAN: return &tan;
45 case TranscendentalCache::LOG: return &log;
46 default: UNIMPLEMENTED();
47 }
48 return NULL;
49 }
50
51
CreateSqrtFunction()52 UnaryMathFunction CreateSqrtFunction() {
53 return &sqrt;
54 }
55
56 // -------------------------------------------------------------------------
57 // Platform-specific RuntimeCallHelper functions.
58
BeforeCall(MacroAssembler * masm) const59 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
60 masm->EnterFrame(StackFrame::INTERNAL);
61 ASSERT(!masm->has_frame());
62 masm->set_has_frame(true);
63 }
64
65
AfterCall(MacroAssembler * masm) const66 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
67 masm->LeaveFrame(StackFrame::INTERNAL);
68 ASSERT(masm->has_frame());
69 masm->set_has_frame(false);
70 }
71
72 // -------------------------------------------------------------------------
73 // Code generators
74
GenerateSmiOnlyToObject(MacroAssembler * masm)75 void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
76 MacroAssembler* masm) {
77 // ----------- S t a t e -------------
78 // -- a0 : value
79 // -- a1 : key
80 // -- a2 : receiver
81 // -- ra : return address
82 // -- a3 : target map, scratch for subsequent call
83 // -- t0 : scratch (elements)
84 // -----------------------------------
85 // Set transitioned map.
86 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
87 __ RecordWriteField(a2,
88 HeapObject::kMapOffset,
89 a3,
90 t5,
91 kRAHasNotBeenSaved,
92 kDontSaveFPRegs,
93 EMIT_REMEMBERED_SET,
94 OMIT_SMI_CHECK);
95 }
96
97
GenerateSmiOnlyToDouble(MacroAssembler * masm,Label * fail)98 void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
99 MacroAssembler* masm, Label* fail) {
100 // ----------- S t a t e -------------
101 // -- a0 : value
102 // -- a1 : key
103 // -- a2 : receiver
104 // -- ra : return address
105 // -- a3 : target map, scratch for subsequent call
106 // -- t0 : scratch (elements)
107 // -----------------------------------
108 Label loop, entry, convert_hole, gc_required, only_change_map, done;
109 bool fpu_supported = CpuFeatures::IsSupported(FPU);
110
111 Register scratch = t6;
112
113 // Check for empty arrays, which only require a map transition and no changes
114 // to the backing store.
115 __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
116 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
117 __ Branch(&only_change_map, eq, at, Operand(t0));
118
119 __ push(ra);
120 __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
121 // t0: source FixedArray
122 // t1: number of elements (smi-tagged)
123
124 // Allocate new FixedDoubleArray.
125 __ sll(scratch, t1, 2);
126 __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
127 __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
128 // t2: destination FixedDoubleArray, not tagged as heap object
129 // Set destination FixedDoubleArray's length and map.
130 __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
131 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
132 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
133 // Update receiver's map.
134
135 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
136 __ RecordWriteField(a2,
137 HeapObject::kMapOffset,
138 a3,
139 t5,
140 kRAHasBeenSaved,
141 kDontSaveFPRegs,
142 OMIT_REMEMBERED_SET,
143 OMIT_SMI_CHECK);
144 // Replace receiver's backing store with newly created FixedDoubleArray.
145 __ Addu(a3, t2, Operand(kHeapObjectTag));
146 __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
147 __ RecordWriteField(a2,
148 JSObject::kElementsOffset,
149 a3,
150 t5,
151 kRAHasBeenSaved,
152 kDontSaveFPRegs,
153 EMIT_REMEMBERED_SET,
154 OMIT_SMI_CHECK);
155
156
157 // Prepare for conversion loop.
158 __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
159 __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
160 __ sll(t2, t1, 2);
161 __ Addu(t2, t2, t3);
162 __ li(t0, Operand(kHoleNanLower32));
163 __ li(t1, Operand(kHoleNanUpper32));
164 // t0: kHoleNanLower32
165 // t1: kHoleNanUpper32
166 // t2: end of destination FixedDoubleArray, not tagged
167 // t3: begin of FixedDoubleArray element fields, not tagged
168
169 if (!fpu_supported) __ Push(a1, a0);
170
171 __ Branch(&entry);
172
173 __ bind(&only_change_map);
174 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
175 __ RecordWriteField(a2,
176 HeapObject::kMapOffset,
177 a3,
178 t5,
179 kRAHasBeenSaved,
180 kDontSaveFPRegs,
181 OMIT_REMEMBERED_SET,
182 OMIT_SMI_CHECK);
183 __ Branch(&done);
184
185 // Call into runtime if GC is required.
186 __ bind(&gc_required);
187 __ pop(ra);
188 __ Branch(fail);
189
190 // Convert and copy elements.
191 __ bind(&loop);
192 __ lw(t5, MemOperand(a3));
193 __ Addu(a3, a3, kIntSize);
194 // t5: current element
195 __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
196
197 // Normal smi, convert to double and store.
198 if (fpu_supported) {
199 CpuFeatures::Scope scope(FPU);
200 __ mtc1(t5, f0);
201 __ cvt_d_w(f0, f0);
202 __ sdc1(f0, MemOperand(t3));
203 __ Addu(t3, t3, kDoubleSize);
204 } else {
205 FloatingPointHelper::ConvertIntToDouble(masm,
206 t5,
207 FloatingPointHelper::kCoreRegisters,
208 f0,
209 a0,
210 a1,
211 t7,
212 f0);
213 __ sw(a0, MemOperand(t3)); // mantissa
214 __ sw(a1, MemOperand(t3, kIntSize)); // exponent
215 __ Addu(t3, t3, kDoubleSize);
216 }
217 __ Branch(&entry);
218
219 // Hole found, store the-hole NaN.
220 __ bind(&convert_hole);
221 if (FLAG_debug_code) {
222 // Restore a "smi-untagged" heap object.
223 __ SmiTag(t5);
224 __ Or(t5, t5, Operand(1));
225 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
226 __ Assert(eq, "object found in smi-only array", at, Operand(t5));
227 }
228 __ sw(t0, MemOperand(t3)); // mantissa
229 __ sw(t1, MemOperand(t3, kIntSize)); // exponent
230 __ Addu(t3, t3, kDoubleSize);
231
232 __ bind(&entry);
233 __ Branch(&loop, lt, t3, Operand(t2));
234
235 if (!fpu_supported) __ Pop(a1, a0);
236 __ pop(ra);
237 __ bind(&done);
238 }
239
240
GenerateDoubleToObject(MacroAssembler * masm,Label * fail)241 void ElementsTransitionGenerator::GenerateDoubleToObject(
242 MacroAssembler* masm, Label* fail) {
243 // ----------- S t a t e -------------
244 // -- a0 : value
245 // -- a1 : key
246 // -- a2 : receiver
247 // -- ra : return address
248 // -- a3 : target map, scratch for subsequent call
249 // -- t0 : scratch (elements)
250 // -----------------------------------
251 Label entry, loop, convert_hole, gc_required, only_change_map;
252
253 // Check for empty arrays, which only require a map transition and no changes
254 // to the backing store.
255 __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
256 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
257 __ Branch(&only_change_map, eq, at, Operand(t0));
258
259 __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
260
261 __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
262 // t0: source FixedArray
263 // t1: number of elements (smi-tagged)
264
265 // Allocate new FixedArray.
266 __ sll(a0, t1, 1);
267 __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
268 __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
269 // t2: destination FixedArray, not tagged as heap object
270 // Set destination FixedDoubleArray's length and map.
271 __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
272 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
273 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
274
275 // Prepare for conversion loop.
276 __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
277 __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
278 __ Addu(t2, t2, Operand(kHeapObjectTag));
279 __ sll(t1, t1, 1);
280 __ Addu(t1, a3, t1);
281 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
282 __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
283 // Using offsetted addresses.
284 // a3: begin of destination FixedArray element fields, not tagged
285 // t0: begin of source FixedDoubleArray element fields, not tagged, +4
286 // t1: end of destination FixedArray, not tagged
287 // t2: destination FixedArray
288 // t3: the-hole pointer
289 // t5: heap number map
290 __ Branch(&entry);
291
292 // Call into runtime if GC is required.
293 __ bind(&gc_required);
294 __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
295
296 __ Branch(fail);
297
298 __ bind(&loop);
299 __ lw(a1, MemOperand(t0));
300 __ Addu(t0, t0, kDoubleSize);
301 // a1: current element's upper 32 bit
302 // t0: address of next element's upper 32 bit
303 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
304
305 // Non-hole double, copy value into a heap number.
306 __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
307 // a2: new heap number
308 __ lw(a0, MemOperand(t0, -12));
309 __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
310 __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
311 __ mov(a0, a3);
312 __ sw(a2, MemOperand(a3));
313 __ Addu(a3, a3, kIntSize);
314 __ RecordWrite(t2,
315 a0,
316 a2,
317 kRAHasBeenSaved,
318 kDontSaveFPRegs,
319 EMIT_REMEMBERED_SET,
320 OMIT_SMI_CHECK);
321 __ Branch(&entry);
322
323 // Replace the-hole NaN with the-hole pointer.
324 __ bind(&convert_hole);
325 __ sw(t3, MemOperand(a3));
326 __ Addu(a3, a3, kIntSize);
327
328 __ bind(&entry);
329 __ Branch(&loop, lt, a3, Operand(t1));
330
331 __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
332 // Replace receiver's backing store with newly created and filled FixedArray.
333 __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
334 __ RecordWriteField(a2,
335 JSObject::kElementsOffset,
336 t2,
337 t5,
338 kRAHasBeenSaved,
339 kDontSaveFPRegs,
340 EMIT_REMEMBERED_SET,
341 OMIT_SMI_CHECK);
342 __ pop(ra);
343
344 __ bind(&only_change_map);
345 // Update receiver's map.
346 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
347 __ RecordWriteField(a2,
348 HeapObject::kMapOffset,
349 a3,
350 t5,
351 kRAHasNotBeenSaved,
352 kDontSaveFPRegs,
353 OMIT_REMEMBERED_SET,
354 OMIT_SMI_CHECK);
355 }
356
357
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)358 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
359 Register string,
360 Register index,
361 Register result,
362 Label* call_runtime) {
363 // Fetch the instance type of the receiver into result register.
364 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
365 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
366
367 // We need special handling for indirect strings.
368 Label check_sequential;
369 __ And(at, result, Operand(kIsIndirectStringMask));
370 __ Branch(&check_sequential, eq, at, Operand(zero_reg));
371
372 // Dispatch on the indirect string shape: slice or cons.
373 Label cons_string;
374 __ And(at, result, Operand(kSlicedNotConsMask));
375 __ Branch(&cons_string, eq, at, Operand(zero_reg));
376
377 // Handle slices.
378 Label indirect_string_loaded;
379 __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
380 __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
381 __ sra(at, result, kSmiTagSize);
382 __ Addu(index, index, at);
383 __ jmp(&indirect_string_loaded);
384
385 // Handle cons strings.
386 // Check whether the right hand side is the empty string (i.e. if
387 // this is really a flat string in a cons string). If that is not
388 // the case we would rather go to the runtime system now to flatten
389 // the string.
390 __ bind(&cons_string);
391 __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
392 __ LoadRoot(at, Heap::kEmptyStringRootIndex);
393 __ Branch(call_runtime, ne, result, Operand(at));
394 // Get the first of the two strings and load its instance type.
395 __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
396
397 __ bind(&indirect_string_loaded);
398 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
399 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
400
401 // Distinguish sequential and external strings. Only these two string
402 // representations can reach here (slices and flat cons strings have been
403 // reduced to the underlying sequential or external string).
404 Label external_string, check_encoding;
405 __ bind(&check_sequential);
406 STATIC_ASSERT(kSeqStringTag == 0);
407 __ And(at, result, Operand(kStringRepresentationMask));
408 __ Branch(&external_string, ne, at, Operand(zero_reg));
409
410 // Prepare sequential strings
411 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
412 __ Addu(string,
413 string,
414 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
415 __ jmp(&check_encoding);
416
417 // Handle external strings.
418 __ bind(&external_string);
419 if (FLAG_debug_code) {
420 // Assert that we do not have a cons or slice (indirect strings) here.
421 // Sequential strings have already been ruled out.
422 __ And(at, result, Operand(kIsIndirectStringMask));
423 __ Assert(eq, "external string expected, but not found",
424 at, Operand(zero_reg));
425 }
426 // Rule out short external strings.
427 STATIC_CHECK(kShortExternalStringTag != 0);
428 __ And(at, result, Operand(kShortExternalStringMask));
429 __ Branch(call_runtime, ne, at, Operand(zero_reg));
430 __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
431
432 Label ascii, done;
433 __ bind(&check_encoding);
434 STATIC_ASSERT(kTwoByteStringTag == 0);
435 __ And(at, result, Operand(kStringEncodingMask));
436 __ Branch(&ascii, ne, at, Operand(zero_reg));
437 // Two-byte string.
438 __ sll(at, index, 1);
439 __ Addu(at, string, at);
440 __ lhu(result, MemOperand(at));
441 __ jmp(&done);
442 __ bind(&ascii);
443 // Ascii string.
444 __ Addu(at, string, index);
445 __ lbu(result, MemOperand(at));
446 __ bind(&done);
447 }
448
449 #undef __
450
451 } } // namespace v8::internal
452
453 #endif // V8_TARGET_ARCH_MIPS
454