1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/s390/codegen-s390.h"
6
7 #if V8_TARGET_ARCH_S390
8
9 #include <memory>
10
11 #include "src/codegen.h"
12 #include "src/macro-assembler.h"
13 #include "src/s390/simulator-s390.h"
14
15 namespace v8 {
16 namespace internal {
17
18 #define __ masm.
19
CreateSqrtFunction(Isolate * isolate)20 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
21 #if defined(USE_SIMULATOR)
22 return nullptr;
23 #else
24 size_t actual_size;
25 byte* buffer =
26 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
27 if (buffer == nullptr) return nullptr;
28
29 MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
30 CodeObjectRequired::kNo);
31
32 __ MovFromFloatParameter(d0);
33 __ sqdbr(d0, d0);
34 __ MovToFloatResult(d0);
35 __ Ret();
36
37 CodeDesc desc;
38 masm.GetCode(&desc);
39 DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
40
41 Assembler::FlushICache(isolate, buffer, actual_size);
42 base::OS::ProtectCode(buffer, actual_size);
43 return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
44 #endif
45 }
46
47 #undef __
48
49 // -------------------------------------------------------------------------
50 // Platform-specific RuntimeCallHelper functions.
51
BeforeCall(MacroAssembler * masm) const52 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
53 masm->EnterFrame(StackFrame::INTERNAL);
54 DCHECK(!masm->has_frame());
55 masm->set_has_frame(true);
56 }
57
AfterCall(MacroAssembler * masm) const58 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
59 masm->LeaveFrame(StackFrame::INTERNAL);
60 DCHECK(masm->has_frame());
61 masm->set_has_frame(false);
62 }
63
64 // -------------------------------------------------------------------------
65 // Code generators
66
67 #define __ ACCESS_MASM(masm)
68
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)69 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
70 MacroAssembler* masm, Register receiver, Register key, Register value,
71 Register target_map, AllocationSiteMode mode,
72 Label* allocation_memento_found) {
73 Register scratch_elements = r6;
74 DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
75
76 if (mode == TRACK_ALLOCATION_SITE) {
77 DCHECK(allocation_memento_found != NULL);
78 __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
79 allocation_memento_found);
80 }
81
82 // Set transitioned map.
83 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
84 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
85 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
86 OMIT_SMI_CHECK);
87 }
88
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)89 void ElementsTransitionGenerator::GenerateSmiToDouble(
90 MacroAssembler* masm, Register receiver, Register key, Register value,
91 Register target_map, AllocationSiteMode mode, Label* fail) {
92 // lr contains the return address
93 Label loop, entry, convert_hole, gc_required, only_change_map, done;
94 Register elements = r6;
95 Register length = r7;
96 Register array = r8;
97 Register array_end = array;
98
99 // target_map parameter can be clobbered.
100 Register scratch1 = target_map;
101 Register scratch2 = r1;
102
103 // Verify input registers don't conflict with locals.
104 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
105 scratch2));
106
107 if (mode == TRACK_ALLOCATION_SITE) {
108 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
109 }
110
111 // Check for empty arrays, which only require a map transition and no changes
112 // to the backing store.
113 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
114 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
115 __ beq(&only_change_map, Label::kNear);
116
117 // Preserve lr and use r14 as a temporary register.
118 __ push(r14);
119
120 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
121 // length: number of elements (smi-tagged)
122
123 // Allocate new FixedDoubleArray.
124 __ SmiToDoubleArrayOffset(r14, length);
125 __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
126 __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
127 __ SubP(array, array, Operand(kHeapObjectTag));
128 // Set destination FixedDoubleArray's length and map.
129 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
130 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
131 // Update receiver's map.
132 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
133
134 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
135 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
136 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
137 OMIT_SMI_CHECK);
138 // Replace receiver's backing store with newly created FixedDoubleArray.
139 __ AddP(scratch1, array, Operand(kHeapObjectTag));
140 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
141 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
142 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
143 OMIT_SMI_CHECK);
144
145 // Prepare for conversion loop.
146 __ AddP(target_map, elements,
147 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
148 __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
149 __ SmiToDoubleArrayOffset(array, length);
150 __ AddP(array_end, r9, array);
151 // Repurpose registers no longer in use.
152 #if V8_TARGET_ARCH_S390X
153 Register hole_int64 = elements;
154 #else
155 Register hole_lower = elements;
156 Register hole_upper = length;
157 #endif
158 // scratch1: begin of source FixedArray element fields, not tagged
159 // hole_lower: kHoleNanLower32 OR hol_int64
160 // hole_upper: kHoleNanUpper32
161 // array_end: end of destination FixedDoubleArray, not tagged
162 // scratch2: begin of FixedDoubleArray element fields, not tagged
163
164 __ b(&entry, Label::kNear);
165
166 __ bind(&only_change_map);
167 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
168 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
169 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
170 OMIT_SMI_CHECK);
171 __ b(&done, Label::kNear);
172
173 // Call into runtime if GC is required.
174 __ bind(&gc_required);
175 __ pop(r14);
176 __ b(fail);
177
178 // Convert and copy elements.
179 __ bind(&loop);
180 __ LoadP(r14, MemOperand(scratch1));
181 __ la(scratch1, MemOperand(scratch1, kPointerSize));
182 // r1: current element
183 __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
184
185 // Normal smi, convert to double and store.
186 __ ConvertIntToDouble(r14, d0);
187 __ StoreDouble(d0, MemOperand(r9, 0));
188 __ la(r9, MemOperand(r9, 8));
189
190 __ b(&entry, Label::kNear);
191
192 // Hole found, store the-hole NaN.
193 __ bind(&convert_hole);
194 if (FLAG_debug_code) {
195 // Restore a "smi-untagged" heap object.
196 __ LoadP(r1, MemOperand(r5, -kPointerSize));
197 __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
198 __ Assert(eq, kObjectFoundInSmiOnlyArray);
199 }
200 #if V8_TARGET_ARCH_S390X
201 __ stg(hole_int64, MemOperand(r9, 0));
202 #else
203 __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
204 __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
205 #endif
206 __ AddP(r9, Operand(8));
207
208 __ bind(&entry);
209 __ CmpP(r9, array_end);
210 __ blt(&loop);
211
212 __ pop(r14);
213 __ bind(&done);
214 }
215
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)216 void ElementsTransitionGenerator::GenerateDoubleToObject(
217 MacroAssembler* masm, Register receiver, Register key, Register value,
218 Register target_map, AllocationSiteMode mode, Label* fail) {
219 // Register lr contains the return address.
220 Label loop, convert_hole, gc_required, only_change_map;
221 Register elements = r6;
222 Register array = r8;
223 Register length = r7;
224 Register scratch = r1;
225 Register scratch3 = r9;
226 Register hole_value = r9;
227
228 // Verify input registers don't conflict with locals.
229 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
230 scratch));
231
232 if (mode == TRACK_ALLOCATION_SITE) {
233 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
234 }
235
236 // Check for empty arrays, which only require a map transition and no changes
237 // to the backing store.
238 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
239 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
240 __ beq(&only_change_map);
241
242 __ Push(target_map, receiver, key, value);
243 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
244 // elements: source FixedDoubleArray
245 // length: number of elements (smi-tagged)
246
247 // Allocate new FixedArray.
248 // Re-use value and target_map registers, as they have been saved on the
249 // stack.
250 Register array_size = value;
251 Register allocate_scratch = target_map;
252 __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
253 __ SmiToPtrArrayOffset(r0, length);
254 __ AddP(array_size, r0);
255 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
256 NO_ALLOCATION_FLAGS);
257 // array: destination FixedArray, tagged as heap object
258 // Set destination FixedDoubleArray's length and map.
259 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
260 __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
261 r0);
262 __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
263
264 // Prepare for conversion loop.
265 Register src_elements = elements;
266 Register dst_elements = target_map;
267 Register dst_end = length;
268 Register heap_number_map = scratch;
269 __ AddP(src_elements,
270 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
271 __ SmiToPtrArrayOffset(length, length);
272 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
273
274 Label initialization_loop, loop_done;
275 __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
276 __ beq(&loop_done, Label::kNear);
277
278 // Allocating heap numbers in the loop below can fail and cause a jump to
279 // gc_required. We can't leave a partly initialized FixedArray behind,
280 // so pessimistically fill it with holes now.
281 __ AddP(dst_elements, array,
282 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
283 __ bind(&initialization_loop);
284 __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
285 __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
286 __ BranchOnCount(scratch, &initialization_loop);
287
288 __ AddP(dst_elements, array,
289 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
290 __ AddP(dst_end, dst_elements, length);
291 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
292 // Using offsetted addresses in src_elements to fully take advantage of
293 // post-indexing.
294 // dst_elements: begin of destination FixedArray element fields, not tagged
295 // src_elements: begin of source FixedDoubleArray element fields,
296 // not tagged, +4
297 // dst_end: end of destination FixedArray, not tagged
298 // array: destination FixedArray
299 // hole_value: the-hole pointer
300 // heap_number_map: heap number map
301 __ b(&loop, Label::kNear);
302
303 // Call into runtime if GC is required.
304 __ bind(&gc_required);
305 __ Pop(target_map, receiver, key, value);
306 __ b(fail);
307
308 // Replace the-hole NaN with the-hole pointer.
309 __ bind(&convert_hole);
310 __ StoreP(hole_value, MemOperand(dst_elements));
311 __ AddP(dst_elements, Operand(kPointerSize));
312 __ CmpLogicalP(dst_elements, dst_end);
313 __ bge(&loop_done);
314
315 __ bind(&loop);
316 Register upper_bits = key;
317 __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
318 __ AddP(src_elements, Operand(kDoubleSize));
319 // upper_bits: current element's upper 32 bit
320 // src_elements: address of next element's upper 32 bit
321 __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
322 __ beq(&convert_hole, Label::kNear);
323
324 // Non-hole double, copy value into a heap number.
325 Register heap_number = receiver;
326 Register scratch2 = value;
327 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
328 &gc_required);
329 // heap_number: new heap number
330 #if V8_TARGET_ARCH_S390X
331 __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
332 // subtract tag for std
333 __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
334 __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
335 #else
336 __ LoadlW(scratch2,
337 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
338 __ LoadlW(upper_bits,
339 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
340 __ StoreW(scratch2,
341 FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
342 __ StoreW(upper_bits,
343 FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
344 #endif
345 __ LoadRR(scratch2, dst_elements);
346 __ StoreP(heap_number, MemOperand(dst_elements));
347 __ AddP(dst_elements, Operand(kPointerSize));
348 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
349 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
350 __ CmpLogicalP(dst_elements, dst_end);
351 __ blt(&loop);
352 __ bind(&loop_done);
353
354 __ Pop(target_map, receiver, key, value);
355 // Replace receiver's backing store with newly created and filled FixedArray.
356 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
357 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
358 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
359 OMIT_SMI_CHECK);
360
361 __ bind(&only_change_map);
362 // Update receiver's map.
363 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
364 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
365 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
366 OMIT_SMI_CHECK);
367 }
368
369 // assume ip can be used as a scratch register below
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)370 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
371 Register index, Register result,
372 Label* call_runtime) {
373 // Fetch the instance type of the receiver into result register.
374 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
375 __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
376
377 // We need special handling for indirect strings.
378 Label check_sequential;
379 __ mov(r0, Operand(kIsIndirectStringMask));
380 __ AndP(r0, result);
381 __ beq(&check_sequential, Label::kNear /*, cr0*/);
382
383 // Dispatch on the indirect string shape: slice or cons.
384 Label cons_string;
385 __ mov(ip, Operand(kSlicedNotConsMask));
386 __ LoadRR(r0, result);
387 __ AndP(r0, ip /*, SetRC*/); // Should be okay to remove RC
388 __ beq(&cons_string, Label::kNear /*, cr0*/);
389
390 // Handle slices.
391 Label indirect_string_loaded;
392 __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
393 __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
394 __ SmiUntag(ip, result);
395 __ AddP(index, ip);
396 __ b(&indirect_string_loaded, Label::kNear);
397
398 // Handle cons strings.
399 // Check whether the right hand side is the empty string (i.e. if
400 // this is really a flat string in a cons string). If that is not
401 // the case we would rather go to the runtime system now to flatten
402 // the string.
403 __ bind(&cons_string);
404 __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
405 __ CompareRoot(result, Heap::kempty_stringRootIndex);
406 __ bne(call_runtime);
407 // Get the first of the two strings and load its instance type.
408 __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
409
410 __ bind(&indirect_string_loaded);
411 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
412 __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
413
414 // Distinguish sequential and external strings. Only these two string
415 // representations can reach here (slices and flat cons strings have been
416 // reduced to the underlying sequential or external string).
417 Label external_string, check_encoding;
418 __ bind(&check_sequential);
419 STATIC_ASSERT(kSeqStringTag == 0);
420 __ mov(r0, Operand(kStringRepresentationMask));
421 __ AndP(r0, result);
422 __ bne(&external_string, Label::kNear);
423
424 // Prepare sequential strings
425 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
426 __ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
427 __ b(&check_encoding, Label::kNear);
428
429 // Handle external strings.
430 __ bind(&external_string);
431 if (FLAG_debug_code) {
432 // Assert that we do not have a cons or slice (indirect strings) here.
433 // Sequential strings have already been ruled out.
434 __ mov(r0, Operand(kIsIndirectStringMask));
435 __ AndP(r0, result);
436 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
437 }
438 // Rule out short external strings.
439 STATIC_ASSERT(kShortExternalStringTag != 0);
440 __ mov(r0, Operand(kShortExternalStringMask));
441 __ AndP(r0, result);
442 __ bne(call_runtime /*, cr0*/);
443 __ LoadP(string,
444 FieldMemOperand(string, ExternalString::kResourceDataOffset));
445
446 Label one_byte, done;
447 __ bind(&check_encoding);
448 STATIC_ASSERT(kTwoByteStringTag == 0);
449 __ mov(r0, Operand(kStringEncodingMask));
450 __ AndP(r0, result);
451 __ bne(&one_byte, Label::kNear);
452 // Two-byte string.
453 __ ShiftLeftP(result, index, Operand(1));
454 __ LoadLogicalHalfWordP(result, MemOperand(string, result));
455 __ b(&done, Label::kNear);
456 __ bind(&one_byte);
457 // One-byte string.
458 __ LoadlB(result, MemOperand(string, index));
459 __ bind(&done);
460 }
461
462 #undef __
463
CodeAgingHelper(Isolate * isolate)464 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
465 USE(isolate);
466 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
467 // Since patcher is a large object, allocate it dynamically when needed,
468 // to avoid overloading the stack in stress conditions.
469 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
470 // the process, before ARM simulator ICache is setup.
471 std::unique_ptr<CodePatcher> patcher(
472 new CodePatcher(isolate, young_sequence_.start(),
473 young_sequence_.length(), CodePatcher::DONT_FLUSH));
474 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
475 patcher->masm()->PushStandardFrame(r3);
476 }
477
478 #ifdef DEBUG
IsOld(byte * candidate) const479 bool CodeAgingHelper::IsOld(byte* candidate) const {
480 return Assembler::IsNop(Assembler::instr_at(candidate));
481 }
482 #endif
483
IsYoungSequence(Isolate * isolate,byte * sequence)484 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
485 bool result = isolate->code_aging_helper()->IsYoung(sequence);
486 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
487 return result;
488 }
489
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)490 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
491 MarkingParity* parity) {
492 if (IsYoungSequence(isolate, sequence)) {
493 *age = kNoAgeCodeAge;
494 *parity = NO_MARKING_PARITY;
495 } else {
496 Code* code = NULL;
497 Address target_address =
498 Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
499 Code* stub = GetCodeFromTargetAddress(target_address);
500 GetCodeAgeAndParity(stub, age, parity);
501 }
502 }
503
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)504 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
505 MarkingParity parity) {
506 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
507 if (age == kNoAgeCodeAge) {
508 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
509 Assembler::FlushICache(isolate, sequence, young_length);
510 } else {
511 // FIXED_SEQUENCE
512 Code* stub = GetCodeAgeStub(isolate, age, parity);
513 CodePatcher patcher(isolate, sequence, young_length);
514 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
515 // We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
516 // knows where to pick up the return address
517 //
518 // Since we can no longer guarentee ip will hold the branch address
519 // because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
520 // can calculate the branch address offset
521 patcher.masm()->nop(); // marker to detect sequence (see IsOld)
522 patcher.masm()->CleanseP(r14);
523 patcher.masm()->Push(r14);
524 patcher.masm()->mov(r2, Operand(target));
525 patcher.masm()->Call(r2);
526 for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
527 i += 2) {
528 // TODO(joransiu): Create nop function to pad
529 // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
530 patcher.masm()->nop(); // 2-byte nops().
531 }
532 }
533 }
534
535 } // namespace internal
536 } // namespace v8
537
538 #endif // V8_TARGET_ARCH_S390
539