1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36
37 #include "src/codegen/s390/assembler-s390.h"
38 #include <set>
39 #include <string>
40
41 #if V8_TARGET_ARCH_S390
42
43 #if V8_HOST_ARCH_S390
44 #include <elf.h> // Required for auxv checks for STFLE support
45 #include <sys/auxv.h>
46 #endif
47
48 #include "src/base/bits.h"
49 #include "src/base/cpu.h"
50 #include "src/codegen/macro-assembler.h"
51 #include "src/codegen/s390/assembler-s390-inl.h"
52 #include "src/codegen/string-constants.h"
53 #include "src/deoptimizer/deoptimizer.h"
54
55 namespace v8 {
56 namespace internal {
57
58 // Get the CPU features enabled by the build.
CpuFeaturesImpliedByCompiler()59 static unsigned CpuFeaturesImpliedByCompiler() {
60 unsigned answer = 0;
61 return answer;
62 }
63
supportsCPUFeature(const char * feature)64 static bool supportsCPUFeature(const char* feature) {
65 static std::set<std::string>& features = *new std::set<std::string>();
66 static std::set<std::string>& all_available_features =
67 *new std::set<std::string>({"iesan3", "zarch", "stfle", "msa", "ldisp",
68 "eimm", "dfp", "etf3eh", "highgprs", "te",
69 "vx"});
70 if (features.empty()) {
71 #if V8_HOST_ARCH_S390
72
73 #ifndef HWCAP_S390_VX
74 #define HWCAP_S390_VX 2048
75 #endif
76 #define CHECK_AVAILABILITY_FOR(mask, value) \
77 if (f & mask) features.insert(value);
78
79 // initialize feature vector
80 uint64_t f = getauxval(AT_HWCAP);
81 CHECK_AVAILABILITY_FOR(HWCAP_S390_ESAN3, "iesan3")
82 CHECK_AVAILABILITY_FOR(HWCAP_S390_ZARCH, "zarch")
83 CHECK_AVAILABILITY_FOR(HWCAP_S390_STFLE, "stfle")
84 CHECK_AVAILABILITY_FOR(HWCAP_S390_MSA, "msa")
85 CHECK_AVAILABILITY_FOR(HWCAP_S390_LDISP, "ldisp")
86 CHECK_AVAILABILITY_FOR(HWCAP_S390_EIMM, "eimm")
87 CHECK_AVAILABILITY_FOR(HWCAP_S390_DFP, "dfp")
88 CHECK_AVAILABILITY_FOR(HWCAP_S390_ETF3EH, "etf3eh")
89 CHECK_AVAILABILITY_FOR(HWCAP_S390_HIGH_GPRS, "highgprs")
90 CHECK_AVAILABILITY_FOR(HWCAP_S390_TE, "te")
91 CHECK_AVAILABILITY_FOR(HWCAP_S390_VX, "vx")
92 #else
93 // import all features
94 features.insert(all_available_features.begin(),
95 all_available_features.end());
96 #endif
97 }
98 USE(all_available_features);
99 return features.find(feature) != features.end();
100 }
101
102 #undef CHECK_AVAILABILITY_FOR
103 #undef HWCAP_S390_VX
104
105 // Check whether Store Facility STFLE instruction is available on the platform.
106 // Instruction returns a bit vector of the enabled hardware facilities.
supportsSTFLE()107 static bool supportsSTFLE() {
108 #if V8_HOST_ARCH_S390
109 static bool read_tried = false;
110 static uint32_t auxv_hwcap = 0;
111
112 if (!read_tried) {
113 // Open the AUXV (auxiliary vector) pseudo-file
114 int fd = open("/proc/self/auxv", O_RDONLY);
115
116 read_tried = true;
117 if (fd != -1) {
118 #if V8_TARGET_ARCH_S390X
119 static Elf64_auxv_t buffer[16];
120 Elf64_auxv_t* auxv_element;
121 #else
122 static Elf32_auxv_t buffer[16];
123 Elf32_auxv_t* auxv_element;
124 #endif
125 int bytes_read = 0;
126 while (bytes_read >= 0) {
127 // Read a chunk of the AUXV
128 bytes_read = read(fd, buffer, sizeof(buffer));
129 // Locate and read the platform field of AUXV if it is in the chunk
130 for (auxv_element = buffer;
131 auxv_element + sizeof(auxv_element) <= buffer + bytes_read &&
132 auxv_element->a_type != AT_NULL;
133 auxv_element++) {
134 // We are looking for HWCAP entry in AUXV to search for STFLE support
135 if (auxv_element->a_type == AT_HWCAP) {
136 /* Note: Both auxv_hwcap and buffer are static */
137 auxv_hwcap = auxv_element->a_un.a_val;
138 goto done_reading;
139 }
140 }
141 }
142 done_reading:
143 close(fd);
144 }
145 }
146
147 // Did not find result
148 if (0 == auxv_hwcap) {
149 return false;
150 }
151
152 // HWCAP_S390_STFLE is defined to be 4 in include/asm/elf.h. Currently
153 // hardcoded in case that include file does not exist.
154 const uint32_t _HWCAP_S390_STFLE = 4;
155 return (auxv_hwcap & _HWCAP_S390_STFLE);
156 #else
157 // STFLE is not available on non-s390 hosts
158 return false;
159 #endif
160 }
161
ProbeImpl(bool cross_compile)162 void CpuFeatures::ProbeImpl(bool cross_compile) {
163 supported_ |= CpuFeaturesImpliedByCompiler();
164 icache_line_size_ = 256;
165
166 // Only use statically determined features for cross compile (snapshot).
167 if (cross_compile) return;
168
169 #ifdef DEBUG
170 initialized_ = true;
171 #endif
172
173 static bool performSTFLE = supportsSTFLE();
174
175 // Need to define host, as we are generating inlined S390 assembly to test
176 // for facilities.
177 #if V8_HOST_ARCH_S390
178 if (performSTFLE) {
179 // STFLE D(B) requires:
180 // GPR0 to specify # of double words to update minus 1.
181 // i.e. GPR0 = 0 for 1 doubleword
182 // D(B) to specify to memory location to store the facilities bits
183 // The facilities we are checking for are:
184 // Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
185 // As such, we require only 1 double word
186 int64_t facilities[3] = {0L};
187 int16_t reg0;
188 // LHI sets up GPR0
189 // STFLE is specified as .insn, as opcode is not recognized.
190 // We register the instructions kill r0 (LHI) and the CC (STFLE).
191 asm volatile(
192 "lhi %%r0,2\n"
193 ".insn s,0xb2b00000,%0\n"
194 : "=Q"(facilities), "=r"(reg0)
195 :
196 : "cc", "r0");
197
198 uint64_t one = static_cast<uint64_t>(1);
199 // Test for Distinct Operands Facility - Bit 45
200 if (facilities[0] & (one << (63 - 45))) {
201 supported_ |= (1u << DISTINCT_OPS);
202 }
203 // Test for General Instruction Extension Facility - Bit 34
204 if (facilities[0] & (one << (63 - 34))) {
205 supported_ |= (1u << GENERAL_INSTR_EXT);
206 }
207 // Test for Floating Point Extension Facility - Bit 37
208 if (facilities[0] & (one << (63 - 37))) {
209 supported_ |= (1u << FLOATING_POINT_EXT);
210 }
211 // Test for Vector Facility - Bit 129
212 if (facilities[2] & (one << (63 - (129 - 128))) &&
213 supportsCPUFeature("vx")) {
214 supported_ |= (1u << VECTOR_FACILITY);
215 }
216 // Test for Vector Enhancement Facility 1 - Bit 135
217 if (facilities[2] & (one << (63 - (135 - 128))) &&
218 supportsCPUFeature("vx")) {
219 supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
220 }
221 // Test for Vector Enhancement Facility 2 - Bit 148
222 if (facilities[2] & (one << (63 - (148 - 128))) &&
223 supportsCPUFeature("vx")) {
224 supported_ |= (1u << VECTOR_ENHANCE_FACILITY_2);
225 }
226 // Test for Miscellaneous Instruction Extension Facility - Bit 58
227 if (facilities[0] & (1lu << (63 - 58))) {
228 supported_ |= (1u << MISC_INSTR_EXT2);
229 }
230 }
231 #else
232 // All distinct ops instructions can be simulated
233 supported_ |= (1u << DISTINCT_OPS);
234 // RISBG can be simulated
235 supported_ |= (1u << GENERAL_INSTR_EXT);
236 supported_ |= (1u << FLOATING_POINT_EXT);
237 supported_ |= (1u << MISC_INSTR_EXT2);
238 USE(performSTFLE); // To avoid assert
239 USE(supportsCPUFeature);
240 supported_ |= (1u << VECTOR_FACILITY);
241 supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
242 #endif
243 supported_ |= (1u << FPU);
244 }
245
PrintTarget()246 void CpuFeatures::PrintTarget() {
247 const char* s390_arch = nullptr;
248
249 #if V8_TARGET_ARCH_S390X
250 s390_arch = "s390x";
251 #else
252 s390_arch = "s390";
253 #endif
254
255 PrintF("target %s\n", s390_arch);
256 }
257
PrintFeatures()258 void CpuFeatures::PrintFeatures() {
259 PrintF("FPU=%d\n", CpuFeatures::IsSupported(FPU));
260 PrintF("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
261 PrintF("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
262 PrintF("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
263 PrintF("VECTOR_FACILITY=%d\n", CpuFeatures::IsSupported(VECTOR_FACILITY));
264 PrintF("VECTOR_ENHANCE_FACILITY_1=%d\n",
265 CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
266 PrintF("VECTOR_ENHANCE_FACILITY_2=%d\n",
267 CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2));
268 PrintF("MISC_INSTR_EXT2=%d\n", CpuFeatures::IsSupported(MISC_INSTR_EXT2));
269 }
270
ToRegister(int num)271 Register ToRegister(int num) {
272 DCHECK(num >= 0 && num < kNumRegisters);
273 const Register kRegisters[] = {r0, r1, r2, r3, r4, r5, r6, r7,
274 r8, r9, r10, fp, ip, r13, r14, sp};
275 return kRegisters[num];
276 }
277
278 // -----------------------------------------------------------------------------
279 // Implementation of RelocInfo
280
281 const int RelocInfo::kApplyMask =
282 RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
283 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
284
IsCodedSpecially()285 bool RelocInfo::IsCodedSpecially() {
286 // The deserializer needs to know whether a pointer is specially
287 // coded. Being specially coded on S390 means that it is an iihf/iilf
288 // instruction sequence, and that is always the case inside code
289 // objects.
290 return true;
291 }
292
IsInConstantPool()293 bool RelocInfo::IsInConstantPool() { return false; }
294
wasm_call_tag() const295 uint32_t RelocInfo::wasm_call_tag() const {
296 DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
297 return static_cast<uint32_t>(
298 Assembler::target_address_at(pc_, constant_pool_));
299 }
300
301 // -----------------------------------------------------------------------------
302 // Implementation of Operand and MemOperand
303 // See assembler-s390-inl.h for inlined constructors
304
Operand(Handle<HeapObject> handle)305 Operand::Operand(Handle<HeapObject> handle) {
306 AllowHandleDereference using_location;
307 rm_ = no_reg;
308 value_.immediate = static_cast<intptr_t>(handle.address());
309 rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
310 }
311
EmbeddedNumber(double value)312 Operand Operand::EmbeddedNumber(double value) {
313 int32_t smi;
314 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
315 Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
316 result.is_heap_object_request_ = true;
317 result.value_.heap_object_request = HeapObjectRequest(value);
318 return result;
319 }
320
EmbeddedStringConstant(const StringConstantBase * str)321 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
322 Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
323 result.is_heap_object_request_ = true;
324 result.value_.heap_object_request = HeapObjectRequest(str);
325 return result;
326 }
327
MemOperand(Register rn,int32_t offset)328 MemOperand::MemOperand(Register rn, int32_t offset)
329 : baseRegister(rn), indexRegister(r0), offset_(offset) {}
330
MemOperand(Register rx,Register rb,int32_t offset)331 MemOperand::MemOperand(Register rx, Register rb, int32_t offset)
332 : baseRegister(rb), indexRegister(rx), offset_(offset) {}
333
AllocateAndInstallRequestedHeapObjects(Isolate * isolate)334 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
335 DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
336 for (auto& request : heap_object_requests_) {
337 Handle<HeapObject> object;
338 Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
339 switch (request.kind()) {
340 case HeapObjectRequest::kHeapNumber: {
341 object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
342 request.heap_number());
343 set_target_address_at(pc, kNullAddress, object.address(),
344 SKIP_ICACHE_FLUSH);
345 break;
346 }
347 case HeapObjectRequest::kStringConstant: {
348 const StringConstantBase* str = request.string();
349 CHECK_NOT_NULL(str);
350 set_target_address_at(pc, kNullAddress,
351 str->AllocateStringConstant(isolate).address());
352 break;
353 }
354 }
355 }
356 }
357
358 // -----------------------------------------------------------------------------
359 // Specific instructions, constants, and masks.
360
Assembler(const AssemblerOptions & options,std::unique_ptr<AssemblerBuffer> buffer)361 Assembler::Assembler(const AssemblerOptions& options,
362 std::unique_ptr<AssemblerBuffer> buffer)
363 : AssemblerBase(options, std::move(buffer)),
364 scratch_register_list_(ip.bit()) {
365 reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
366 last_bound_pos_ = 0;
367 relocations_.reserve(128);
368 }
369
GetCode(Isolate * isolate,CodeDesc * desc,SafepointTableBuilder * safepoint_table_builder,int handler_table_offset)370 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
371 SafepointTableBuilder* safepoint_table_builder,
372 int handler_table_offset) {
373 // As a crutch to avoid having to add manual Align calls wherever we use a
374 // raw workflow to create Code objects (mostly in tests), add another Align
375 // call here. It does no harm - the end of the Code object is aligned to the
376 // (larger) kCodeAlignment anyways.
377 // TODO(jgruber): Consider moving responsibility for proper alignment to
378 // metadata table builders (safepoint, handler, constant pool, code
379 // comments).
380 DataAlign(Code::kMetadataAlignment);
381
382 EmitRelocations();
383
384 int code_comments_size = WriteCodeComments();
385
386 AllocateAndInstallRequestedHeapObjects(isolate);
387
388 // Set up code descriptor.
389 // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
390 // this point to make CodeDesc initialization less fiddly.
391
392 static constexpr int kConstantPoolSize = 0;
393 const int instruction_size = pc_offset();
394 const int code_comments_offset = instruction_size - code_comments_size;
395 const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
396 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
397 ? constant_pool_offset
398 : handler_table_offset;
399 const int safepoint_table_offset =
400 (safepoint_table_builder == kNoSafepointTable)
401 ? handler_table_offset2
402 : safepoint_table_builder->GetCodeOffset();
403 const int reloc_info_offset =
404 static_cast<int>(reloc_info_writer.pos() - buffer_->start());
405 CodeDesc::Initialize(desc, this, safepoint_table_offset,
406 handler_table_offset2, constant_pool_offset,
407 code_comments_offset, reloc_info_offset);
408 }
409
Align(int m)410 void Assembler::Align(int m) {
411 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
412 while ((pc_offset() & (m - 1)) != 0) {
413 nop(0);
414 }
415 }
416
CodeTargetAlign()417 void Assembler::CodeTargetAlign() { Align(8); }
418
GetCondition(Instr instr)419 Condition Assembler::GetCondition(Instr instr) {
420 switch (instr & kCondMask) {
421 case BT:
422 return eq;
423 case BF:
424 return ne;
425 default:
426 UNIMPLEMENTED();
427 }
428 return al;
429 }
430
431 #if V8_TARGET_ARCH_S390X
432 // This code assumes a FIXED_SEQUENCE for 64bit loads (iihf/iilf)
Is64BitLoadIntoIP(SixByteInstr instr1,SixByteInstr instr2)433 bool Assembler::Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2) {
434 // Check the instructions are the iihf/iilf load into ip
435 return (((instr1 >> 32) == 0xC0C8) && ((instr2 >> 32) == 0xC0C9));
436 }
437 #else
438 // This code assumes a FIXED_SEQUENCE for 32bit loads (iilf)
Is32BitLoadIntoIP(SixByteInstr instr)439 bool Assembler::Is32BitLoadIntoIP(SixByteInstr instr) {
440 // Check the instruction is an iilf load into ip/r12.
441 return ((instr >> 32) == 0xC0C9);
442 }
443 #endif
444
445 // Labels refer to positions in the (to be) generated code.
446 // There are bound, linked, and unused labels.
447 //
448 // Bound labels refer to known positions in the already
449 // generated code. pos() is the position the label refers to.
450 //
451 // Linked labels refer to unknown positions in the code
452 // to be generated; pos() is the position of the last
453 // instruction using the label.
454
455 // The link chain is terminated by a negative code position (must be aligned)
456 const int kEndOfChain = -4;
457
458 // Returns the target address of the relative instructions, typically
459 // of the form: pos + imm (where immediate is in # of halfwords for
460 // BR* and LARL).
target_at(int pos)461 int Assembler::target_at(int pos) {
462 SixByteInstr instr = instr_at(pos);
463 // check which type of branch this is 16 or 26 bit offset
464 Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
465
466 if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
467 int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
468 imm16 <<= 1; // immediate is in # of halfwords
469 if (imm16 == 0) return kEndOfChain;
470 return pos + imm16;
471 } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
472 BRASL == opcode) {
473 int32_t imm32 =
474 static_cast<int32_t>(instr & (static_cast<uint64_t>(0xFFFFFFFF)));
475 if (LLILF != opcode)
476 imm32 <<= 1; // BR* + LARL treat immediate in # of halfwords
477 if (imm32 == 0) return kEndOfChain;
478 return pos + imm32;
479 } else if (BRXHG == opcode) {
480 // offset is in bits 16-31 of 48 bit instruction
481 instr = instr >> 16;
482 int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
483 imm16 <<= 1; // immediate is in # of halfwords
484 if (imm16 == 0) return kEndOfChain;
485 return pos + imm16;
486 }
487
488 // Unknown condition
489 DCHECK(false);
490 return -1;
491 }
492
493 // Update the target address of the current relative instruction.
target_at_put(int pos,int target_pos,bool * is_branch)494 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
495 SixByteInstr instr = instr_at(pos);
496 Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
497
498 if (is_branch != nullptr) {
499 *is_branch =
500 (opcode == BRC || opcode == BRCT || opcode == BRCTG || opcode == BRCL ||
501 opcode == BRASL || opcode == BRXH || opcode == BRXHG);
502 }
503
504 if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
505 int16_t imm16 = target_pos - pos;
506 instr &= (~0xFFFF);
507 DCHECK(is_int16(imm16));
508 instr_at_put<FourByteInstr>(pos, instr | (imm16 >> 1));
509 return;
510 } else if (BRCL == opcode || LARL == opcode || BRASL == opcode) {
511 // Immediate is in # of halfwords
512 int32_t imm32 = target_pos - pos;
513 instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
514 instr_at_put<SixByteInstr>(pos, instr | (imm32 >> 1));
515 return;
516 } else if (LLILF == opcode) {
517 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
518 // Emitted label constant, not part of a branch.
519 // Make label relative to Code pointer of generated Code object.
520 int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
521 instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
522 instr_at_put<SixByteInstr>(pos, instr | imm32);
523 return;
524 } else if (BRXHG == opcode) {
525 // Immediate is in bits 16-31 of 48 bit instruction
526 int32_t imm16 = target_pos - pos;
527 instr &= (0xFFFF0000FFFF); // clear bits 16-31
528 imm16 &= 0xFFFF; // clear high halfword
529 imm16 <<= 16;
530 // Immediate is in # of halfwords
531 instr_at_put<SixByteInstr>(pos, instr | (imm16 >> 1));
532 return;
533 }
534 DCHECK(false);
535 }
536
537 // Returns the maximum number of bits given instruction can address.
max_reach_from(int pos)538 int Assembler::max_reach_from(int pos) {
539 Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
540 // Check which type of instr. In theory, we can return
541 // the values below + 1, given offset is # of halfwords
542 if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode ||
543 BRXHG == opcode) {
544 return 16;
545 } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
546 BRASL == opcode) {
547 return 31; // Using 31 as workaround instead of 32 as
548 // is_intn(x,32) doesn't work on 32-bit platforms.
549 // llilf: Emitted label constant, not part of
550 // a branch (regexp PushBacktrack).
551 }
552 DCHECK(false);
553 return 16;
554 }
555
bind_to(Label * L,int pos)556 void Assembler::bind_to(Label* L, int pos) {
557 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
558 bool is_branch = false;
559 while (L->is_linked()) {
560 int fixup_pos = L->pos();
561 #ifdef DEBUG
562 int32_t offset = pos - fixup_pos;
563 int maxReach = max_reach_from(fixup_pos);
564 #endif
565 next(L); // call next before overwriting link with target at fixup_pos
566 DCHECK(is_intn(offset, maxReach));
567 target_at_put(fixup_pos, pos, &is_branch);
568 }
569 L->bind_to(pos);
570
571 // Keep track of the last bound label so we don't eliminate any instructions
572 // before a bound label.
573 if (pos > last_bound_pos_) last_bound_pos_ = pos;
574 }
575
bind(Label * L)576 void Assembler::bind(Label* L) {
577 DCHECK(!L->is_bound()); // label can only be bound once
578 bind_to(L, pc_offset());
579 }
580
next(Label * L)581 void Assembler::next(Label* L) {
582 DCHECK(L->is_linked());
583 int link = target_at(L->pos());
584 if (link == kEndOfChain) {
585 L->Unuse();
586 } else {
587 DCHECK_GE(link, 0);
588 L->link_to(link);
589 }
590 }
591
link(Label * L)592 int Assembler::link(Label* L) {
593 int position;
594 if (L->is_bound()) {
595 position = L->pos();
596 } else {
597 if (L->is_linked()) {
598 position = L->pos(); // L's link
599 } else {
600 // was: target_pos = kEndOfChain;
601 // However, using self to mark the first reference
602 // should avoid most instances of branch offset overflow. See
603 // target_at() for where this is converted back to kEndOfChain.
604 position = pc_offset();
605 }
606 L->link_to(pc_offset());
607 }
608
609 return position;
610 }
611
load_label_offset(Register r1,Label * L)612 void Assembler::load_label_offset(Register r1, Label* L) {
613 int target_pos;
614 int constant;
615 if (L->is_bound()) {
616 target_pos = L->pos();
617 constant = target_pos + (Code::kHeaderSize - kHeapObjectTag);
618 } else {
619 if (L->is_linked()) {
620 target_pos = L->pos(); // L's link
621 } else {
622 // was: target_pos = kEndOfChain;
623 // However, using branch to self to mark the first reference
624 // should avoid most instances of branch offset overflow. See
625 // target_at() for where this is converted back to kEndOfChain.
626 target_pos = pc_offset();
627 }
628 L->link_to(pc_offset());
629
630 constant = target_pos - pc_offset();
631 }
632 llilf(r1, Operand(constant));
633 }
634
635 // Pseudo op - branch on condition
branchOnCond(Condition c,int branch_offset,bool is_bound)636 void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
637 int offset_in_halfwords = branch_offset / 2;
638 if (is_bound && is_int16(offset_in_halfwords)) {
639 brc(c, Operand(offset_in_halfwords)); // short jump
640 } else {
641 brcl(c, Operand(offset_in_halfwords)); // long jump
642 }
643 }
644
645 // Exception-generating instructions and debugging support.
646 // Stops with a non-negative code less than kNumOfWatchedStops support
647 // enabling/disabling and a counter feature. See simulator-s390.h .
stop(Condition cond,int32_t code,CRegister cr)648 void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
649 if (cond != al) {
650 Label skip;
651 b(NegateCondition(cond), &skip, Label::kNear);
652 bkpt(0);
653 bind(&skip);
654 } else {
655 bkpt(0);
656 }
657 }
658
bkpt(uint32_t imm16)659 void Assembler::bkpt(uint32_t imm16) {
660 // GDB software breakpoint instruction
661 emit2bytes(0x0001);
662 }
663
664 // Pseudo instructions.
nop(int type)665 void Assembler::nop(int type) {
666 switch (type) {
667 case 0:
668 lr(r0, r0);
669 break;
670 case DEBUG_BREAK_NOP:
671 // TODO(john.yan): Use a better NOP break
672 oill(r3, Operand::Zero());
673 break;
674 default:
675 UNIMPLEMENTED();
676 }
677 }
678
679 // -------------------------
680 // Load Address Instructions
681 // -------------------------
682 // Load Address Relative Long
larl(Register r1,Label * l)683 void Assembler::larl(Register r1, Label* l) {
684 larl(r1, Operand(branch_offset(l)));
685 }
686
EnsureSpaceFor(int space_needed)687 void Assembler::EnsureSpaceFor(int space_needed) {
688 if (buffer_space() <= (kGap + space_needed)) {
689 GrowBuffer(space_needed);
690 }
691 }
692
call(Handle<Code> target,RelocInfo::Mode rmode)693 void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
694 DCHECK(RelocInfo::IsCodeTarget(rmode));
695 EnsureSpace ensure_space(this);
696
697 RecordRelocInfo(rmode);
698 int32_t target_index = AddCodeTarget(target);
699 brasl(r14, Operand(target_index));
700 }
701
jump(Handle<Code> target,RelocInfo::Mode rmode,Condition cond)702 void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
703 Condition cond) {
704 DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
705 EnsureSpace ensure_space(this);
706
707 RecordRelocInfo(rmode);
708 int32_t target_index = AddCodeTarget(target);
709 brcl(cond, Operand(target_index));
710 }
711
712 // end of S390instructions
713
IsNop(SixByteInstr instr,int type)714 bool Assembler::IsNop(SixByteInstr instr, int type) {
715 DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
716 if (DEBUG_BREAK_NOP == type) {
717 return ((instr & 0xFFFFFFFF) == 0xA53B0000); // oill r3, 0
718 }
719 return ((instr & 0xFFFF) == 0x1800); // lr r0,r0
720 }
721
722 // dummy instruction reserved for special use.
dumy(int r1,int x2,int b2,int d2)723 void Assembler::dumy(int r1, int x2, int b2, int d2) {
724 #if defined(USE_SIMULATOR)
725 int op = 0xE353;
726 uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
727 (static_cast<uint64_t>(r1) & 0xF) * B36 |
728 (static_cast<uint64_t>(x2) & 0xF) * B32 |
729 (static_cast<uint64_t>(b2) & 0xF) * B28 |
730 (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
731 (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
732 (static_cast<uint64_t>(op & 0x00FF));
733 emit6bytes(code);
734 #endif
735 }
736
GrowBuffer(int needed)737 void Assembler::GrowBuffer(int needed) {
738 DCHECK_EQ(buffer_start_, buffer_->start());
739
740 // Compute new buffer size.
741 int old_size = buffer_->size();
742 int new_size = std::min(2 * old_size, old_size + 1 * MB);
743 int space = buffer_space() + (new_size - old_size);
744 new_size += (space < needed) ? needed - space : 0;
745
746 // Some internal data structures overflow for very large buffers,
747 // they must ensure that kMaximalBufferSize is not too large.
748 if (new_size > kMaximalBufferSize) {
749 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
750 }
751
752 // Set up new buffer.
753 std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
754 DCHECK_EQ(new_size, new_buffer->size());
755 byte* new_start = new_buffer->start();
756
757 // Copy the data.
758 intptr_t pc_delta = new_start - buffer_start_;
759 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
760 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
761 MemMove(new_start, buffer_start_, pc_offset());
762 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
763 reloc_size);
764
765 // Switch buffers.
766 buffer_ = std::move(new_buffer);
767 buffer_start_ = new_start;
768 pc_ += pc_delta;
769 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
770 reloc_info_writer.last_pc() + pc_delta);
771
772 // None of our relocation types are pc relative pointing outside the code
773 // buffer nor pc absolute pointing inside the code buffer, so there is no need
774 // to relocate any emitted relocation entries.
775 }
776
db(uint8_t data)777 void Assembler::db(uint8_t data) {
778 CheckBuffer();
779 *reinterpret_cast<uint8_t*>(pc_) = data;
780 pc_ += sizeof(uint8_t);
781 }
782
dd(uint32_t data)783 void Assembler::dd(uint32_t data) {
784 CheckBuffer();
785 *reinterpret_cast<uint32_t*>(pc_) = data;
786 pc_ += sizeof(uint32_t);
787 }
788
dq(uint64_t value)789 void Assembler::dq(uint64_t value) {
790 CheckBuffer();
791 *reinterpret_cast<uint64_t*>(pc_) = value;
792 pc_ += sizeof(uint64_t);
793 }
794
dp(uintptr_t data)795 void Assembler::dp(uintptr_t data) {
796 CheckBuffer();
797 *reinterpret_cast<uintptr_t*>(pc_) = data;
798 pc_ += sizeof(uintptr_t);
799 }
800
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)801 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
802 if (!ShouldRecordRelocInfo(rmode)) return;
803 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
804 relocations_.push_back(rinfo);
805 }
806
emit_label_addr(Label * label)807 void Assembler::emit_label_addr(Label* label) {
808 CheckBuffer();
809 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
810 int position = link(label);
811 DCHECK(label->is_bound());
812 // Keep internal references relative until EmitRelocations.
813 dp(position);
814 }
815
EmitRelocations()816 void Assembler::EmitRelocations() {
817 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
818
819 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
820 it != relocations_.end(); it++) {
821 RelocInfo::Mode rmode = it->rmode();
822 Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
823 RelocInfo rinfo(pc, rmode, it->data(), Code());
824
825 // Fix up internal references now that they are guaranteed to be bound.
826 if (RelocInfo::IsInternalReference(rmode)) {
827 // Jump table entry
828 Address pos = Memory<Address>(pc);
829 Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
830 } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
831 // mov sequence
832 Address pos = target_address_at(pc, 0);
833 set_target_address_at(pc, 0,
834 reinterpret_cast<Address>(buffer_start_) + pos,
835 SKIP_ICACHE_FLUSH);
836 }
837
838 reloc_info_writer.Write(&rinfo);
839 }
840 }
841
UseScratchRegisterScope(Assembler * assembler)842 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
843 : assembler_(assembler),
844 old_available_(*assembler->GetScratchRegisterList()) {}
845
~UseScratchRegisterScope()846 UseScratchRegisterScope::~UseScratchRegisterScope() {
847 *assembler_->GetScratchRegisterList() = old_available_;
848 }
849
Acquire()850 Register UseScratchRegisterScope::Acquire() {
851 RegList* available = assembler_->GetScratchRegisterList();
852 DCHECK_NOT_NULL(available);
853 DCHECK_NE(*available, 0);
854 int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
855 Register reg = Register::from_code(index);
856 *available &= ~reg.bit();
857 return reg;
858 }
859 } // namespace internal
860 } // namespace v8
861 #endif // V8_TARGET_ARCH_S390
862