• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36 
37 #include "src/codegen/s390/assembler-s390.h"
38 #include <set>
39 #include <string>
40 
41 #if V8_TARGET_ARCH_S390
42 
43 #if V8_HOST_ARCH_S390
44 #include <elf.h>  // Required for auxv checks for STFLE support
45 #include <sys/auxv.h>
46 #endif
47 
48 #include "src/base/bits.h"
49 #include "src/base/cpu.h"
50 #include "src/codegen/macro-assembler.h"
51 #include "src/codegen/s390/assembler-s390-inl.h"
52 #include "src/codegen/string-constants.h"
53 #include "src/deoptimizer/deoptimizer.h"
54 
55 namespace v8 {
56 namespace internal {
57 
58 // Get the CPU features enabled by the build.
CpuFeaturesImpliedByCompiler()59 static unsigned CpuFeaturesImpliedByCompiler() {
60   unsigned answer = 0;
61   return answer;
62 }
63 
supportsCPUFeature(const char * feature)64 static bool supportsCPUFeature(const char* feature) {
65   static std::set<std::string>& features = *new std::set<std::string>();
66   static std::set<std::string>& all_available_features =
67       *new std::set<std::string>({"iesan3", "zarch", "stfle", "msa", "ldisp",
68                                   "eimm", "dfp", "etf3eh", "highgprs", "te",
69                                   "vx"});
70   if (features.empty()) {
71 #if V8_HOST_ARCH_S390
72 
73 #ifndef HWCAP_S390_VX
74 #define HWCAP_S390_VX 2048
75 #endif
76 #define CHECK_AVAILABILITY_FOR(mask, value) \
77   if (f & mask) features.insert(value);
78 
79     // initialize feature vector
80     uint64_t f = getauxval(AT_HWCAP);
81     CHECK_AVAILABILITY_FOR(HWCAP_S390_ESAN3, "iesan3")
82     CHECK_AVAILABILITY_FOR(HWCAP_S390_ZARCH, "zarch")
83     CHECK_AVAILABILITY_FOR(HWCAP_S390_STFLE, "stfle")
84     CHECK_AVAILABILITY_FOR(HWCAP_S390_MSA, "msa")
85     CHECK_AVAILABILITY_FOR(HWCAP_S390_LDISP, "ldisp")
86     CHECK_AVAILABILITY_FOR(HWCAP_S390_EIMM, "eimm")
87     CHECK_AVAILABILITY_FOR(HWCAP_S390_DFP, "dfp")
88     CHECK_AVAILABILITY_FOR(HWCAP_S390_ETF3EH, "etf3eh")
89     CHECK_AVAILABILITY_FOR(HWCAP_S390_HIGH_GPRS, "highgprs")
90     CHECK_AVAILABILITY_FOR(HWCAP_S390_TE, "te")
91     CHECK_AVAILABILITY_FOR(HWCAP_S390_VX, "vx")
92 #else
93     // import all features
94     features.insert(all_available_features.begin(),
95                     all_available_features.end());
96 #endif
97   }
98   USE(all_available_features);
99   return features.find(feature) != features.end();
100 }
101 
102 #undef CHECK_AVAILABILITY_FOR
103 #undef HWCAP_S390_VX
104 
105 // Check whether Store Facility STFLE instruction is available on the platform.
106 // Instruction returns a bit vector of the enabled hardware facilities.
supportsSTFLE()107 static bool supportsSTFLE() {
108 #if V8_HOST_ARCH_S390
109   static bool read_tried = false;
110   static uint32_t auxv_hwcap = 0;
111 
112   if (!read_tried) {
113     // Open the AUXV (auxiliary vector) pseudo-file
114     int fd = open("/proc/self/auxv", O_RDONLY);
115 
116     read_tried = true;
117     if (fd != -1) {
118 #if V8_TARGET_ARCH_S390X
119       static Elf64_auxv_t buffer[16];
120       Elf64_auxv_t* auxv_element;
121 #else
122       static Elf32_auxv_t buffer[16];
123       Elf32_auxv_t* auxv_element;
124 #endif
125       int bytes_read = 0;
126       while (bytes_read >= 0) {
127         // Read a chunk of the AUXV
128         bytes_read = read(fd, buffer, sizeof(buffer));
129         // Locate and read the platform field of AUXV if it is in the chunk
130         for (auxv_element = buffer;
131              auxv_element + sizeof(auxv_element) <= buffer + bytes_read &&
132              auxv_element->a_type != AT_NULL;
133              auxv_element++) {
134           // We are looking for HWCAP entry in AUXV to search for STFLE support
135           if (auxv_element->a_type == AT_HWCAP) {
136             /* Note: Both auxv_hwcap and buffer are static */
137             auxv_hwcap = auxv_element->a_un.a_val;
138             goto done_reading;
139           }
140         }
141       }
142     done_reading:
143       close(fd);
144     }
145   }
146 
147   // Did not find result
148   if (0 == auxv_hwcap) {
149     return false;
150   }
151 
152   // HWCAP_S390_STFLE is defined to be 4 in include/asm/elf.h.  Currently
153   // hardcoded in case that include file does not exist.
154   const uint32_t _HWCAP_S390_STFLE = 4;
155   return (auxv_hwcap & _HWCAP_S390_STFLE);
156 #else
157   // STFLE is not available on non-s390 hosts
158   return false;
159 #endif
160 }
161 
SupportsWasmSimd128()162 bool CpuFeatures::SupportsWasmSimd128() {
163 #if V8_ENABLE_WEBASSEMBLY
164   return CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1);
165 #else
166   return false;
167 #endif  // V8_ENABLE_WEBASSEMBLY
168 }
169 
ProbeImpl(bool cross_compile)170 void CpuFeatures::ProbeImpl(bool cross_compile) {
171   supported_ |= CpuFeaturesImpliedByCompiler();
172   icache_line_size_ = 256;
173 
174   // Only use statically determined features for cross compile (snapshot).
175   if (cross_compile) return;
176 
177 #ifdef DEBUG
178   initialized_ = true;
179 #endif
180 
181   static bool performSTFLE = supportsSTFLE();
182 
183 // Need to define host, as we are generating inlined S390 assembly to test
184 // for facilities.
185 #if V8_HOST_ARCH_S390
186   if (performSTFLE) {
187     // STFLE D(B) requires:
188     //    GPR0 to specify # of double words to update minus 1.
189     //      i.e. GPR0 = 0 for 1 doubleword
190     //    D(B) to specify to memory location to store the facilities bits
191     // The facilities we are checking for are:
192     //   Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
193     // As such, we require only 1 double word
194     int64_t facilities[3] = {0L};
195     int16_t reg0;
196     // LHI sets up GPR0
197     // STFLE is specified as .insn, as opcode is not recognized.
198     // We register the instructions kill r0 (LHI) and the CC (STFLE).
199     asm volatile(
200         "lhi   %%r0,2\n"
201         ".insn s,0xb2b00000,%0\n"
202         : "=Q"(facilities), "=r"(reg0)
203         :
204         : "cc", "r0");
205 
206     uint64_t one = static_cast<uint64_t>(1);
207     // Test for Distinct Operands Facility - Bit 45
208     if (facilities[0] & (one << (63 - 45))) {
209       supported_ |= (1u << DISTINCT_OPS);
210     }
211     // Test for General Instruction Extension Facility - Bit 34
212     if (facilities[0] & (one << (63 - 34))) {
213       supported_ |= (1u << GENERAL_INSTR_EXT);
214     }
215     // Test for Floating Point Extension Facility - Bit 37
216     if (facilities[0] & (one << (63 - 37))) {
217       supported_ |= (1u << FLOATING_POINT_EXT);
218     }
219     // Test for Vector Facility - Bit 129
220     if (facilities[2] & (one << (63 - (129 - 128))) &&
221         supportsCPUFeature("vx")) {
222       supported_ |= (1u << VECTOR_FACILITY);
223     }
224     // Test for Vector Enhancement Facility 1 - Bit 135
225     if (facilities[2] & (one << (63 - (135 - 128))) &&
226         supportsCPUFeature("vx")) {
227       supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
228     }
229     // Test for Vector Enhancement Facility 2 - Bit 148
230     if (facilities[2] & (one << (63 - (148 - 128))) &&
231         supportsCPUFeature("vx")) {
232       supported_ |= (1u << VECTOR_ENHANCE_FACILITY_2);
233     }
234     // Test for Miscellaneous Instruction Extension Facility - Bit 58
235     if (facilities[0] & (1lu << (63 - 58))) {
236       supported_ |= (1u << MISC_INSTR_EXT2);
237     }
238   }
239 #else
240   // All distinct ops instructions can be simulated
241   supported_ |= (1u << DISTINCT_OPS);
242   // RISBG can be simulated
243   supported_ |= (1u << GENERAL_INSTR_EXT);
244   supported_ |= (1u << FLOATING_POINT_EXT);
245   supported_ |= (1u << MISC_INSTR_EXT2);
246   USE(performSTFLE);  // To avoid assert
247   USE(supportsCPUFeature);
248   supported_ |= (1u << VECTOR_FACILITY);
249   supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
250   supported_ |= (1u << VECTOR_ENHANCE_FACILITY_2);
251 #endif
252   supported_ |= (1u << FPU);
253 
254   // Set a static value on whether Simd is supported.
255   // This variable is only used for certain archs to query SupportWasmSimd128()
256   // at runtime in builtins using an extern ref. Other callers should use
257   // CpuFeatures::SupportWasmSimd128().
258   CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
259 }
260 
PrintTarget()261 void CpuFeatures::PrintTarget() {
262   const char* s390_arch = nullptr;
263 
264 #if V8_TARGET_ARCH_S390X
265   s390_arch = "s390x";
266 #else
267   s390_arch = "s390";
268 #endif
269 
270   PrintF("target %s\n", s390_arch);
271 }
272 
PrintFeatures()273 void CpuFeatures::PrintFeatures() {
274   PrintF("FPU=%d\n", CpuFeatures::IsSupported(FPU));
275   PrintF("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
276   PrintF("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
277   PrintF("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
278   PrintF("VECTOR_FACILITY=%d\n", CpuFeatures::IsSupported(VECTOR_FACILITY));
279   PrintF("VECTOR_ENHANCE_FACILITY_1=%d\n",
280          CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
281   PrintF("VECTOR_ENHANCE_FACILITY_2=%d\n",
282          CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2));
283   PrintF("MISC_INSTR_EXT2=%d\n", CpuFeatures::IsSupported(MISC_INSTR_EXT2));
284 }
285 
ToRegister(int num)286 Register ToRegister(int num) {
287   DCHECK(num >= 0 && num < kNumRegisters);
288   const Register kRegisters[] = {r0, r1, r2,  r3, r4, r5,  r6,  r7,
289                                  r8, r9, r10, fp, ip, r13, r14, sp};
290   return kRegisters[num];
291 }
292 
293 // -----------------------------------------------------------------------------
294 // Implementation of RelocInfo
295 
296 const int RelocInfo::kApplyMask =
297     RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
298     RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
299 
IsCodedSpecially()300 bool RelocInfo::IsCodedSpecially() {
301   // The deserializer needs to know whether a pointer is specially
302   // coded.  Being specially coded on S390 means that it is an iihf/iilf
303   // instruction sequence, and that is always the case inside code
304   // objects.
305   return true;
306 }
307 
IsInConstantPool()308 bool RelocInfo::IsInConstantPool() { return false; }
309 
wasm_call_tag() const310 uint32_t RelocInfo::wasm_call_tag() const {
311   DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
312   return static_cast<uint32_t>(
313       Assembler::target_address_at(pc_, constant_pool_));
314 }
315 
316 // -----------------------------------------------------------------------------
317 // Implementation of Operand and MemOperand
318 // See assembler-s390-inl.h for inlined constructors
319 
Operand(Handle<HeapObject> handle)320 Operand::Operand(Handle<HeapObject> handle) {
321   AllowHandleDereference using_location;
322   rm_ = no_reg;
323   value_.immediate = static_cast<intptr_t>(handle.address());
324   rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
325 }
326 
EmbeddedNumber(double value)327 Operand Operand::EmbeddedNumber(double value) {
328   int32_t smi;
329   if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
330   Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
331   result.is_heap_object_request_ = true;
332   result.value_.heap_object_request = HeapObjectRequest(value);
333   return result;
334 }
335 
EmbeddedStringConstant(const StringConstantBase * str)336 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
337   Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
338   result.is_heap_object_request_ = true;
339   result.value_.heap_object_request = HeapObjectRequest(str);
340   return result;
341 }
342 
MemOperand(Register rn,int32_t offset)343 MemOperand::MemOperand(Register rn, int32_t offset)
344     : baseRegister(rn), indexRegister(r0), offset_(offset) {}
345 
MemOperand(Register rx,Register rb,int32_t offset)346 MemOperand::MemOperand(Register rx, Register rb, int32_t offset)
347     : baseRegister(rb), indexRegister(rx), offset_(offset) {}
348 
AllocateAndInstallRequestedHeapObjects(Isolate * isolate)349 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
350   DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
351   for (auto& request : heap_object_requests_) {
352     Handle<HeapObject> object;
353     Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
354     switch (request.kind()) {
355       case HeapObjectRequest::kHeapNumber: {
356         object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
357             request.heap_number());
358         set_target_address_at(pc, kNullAddress, object.address(),
359                               SKIP_ICACHE_FLUSH);
360         break;
361       }
362       case HeapObjectRequest::kStringConstant: {
363         const StringConstantBase* str = request.string();
364         CHECK_NOT_NULL(str);
365         set_target_address_at(pc, kNullAddress,
366                               str->AllocateStringConstant(isolate).address());
367         break;
368       }
369     }
370   }
371 }
372 
373 // -----------------------------------------------------------------------------
374 // Specific instructions, constants, and masks.
375 
Assembler(const AssemblerOptions & options,std::unique_ptr<AssemblerBuffer> buffer)376 Assembler::Assembler(const AssemblerOptions& options,
377                      std::unique_ptr<AssemblerBuffer> buffer)
378     : AssemblerBase(options, std::move(buffer)), scratch_register_list_({ip}) {
379   reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
380   last_bound_pos_ = 0;
381   relocations_.reserve(128);
382 }
383 
GetCode(Isolate * isolate,CodeDesc * desc,SafepointTableBuilder * safepoint_table_builder,int handler_table_offset)384 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
385                         SafepointTableBuilder* safepoint_table_builder,
386                         int handler_table_offset) {
387   // As a crutch to avoid having to add manual Align calls wherever we use a
388   // raw workflow to create Code objects (mostly in tests), add another Align
389   // call here. It does no harm - the end of the Code object is aligned to the
390   // (larger) kCodeAlignment anyways.
391   // TODO(jgruber): Consider moving responsibility for proper alignment to
392   // metadata table builders (safepoint, handler, constant pool, code
393   // comments).
394   DataAlign(Code::kMetadataAlignment);
395 
396   EmitRelocations();
397 
398   int code_comments_size = WriteCodeComments();
399 
400   AllocateAndInstallRequestedHeapObjects(isolate);
401 
402   // Set up code descriptor.
403   // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
404   // this point to make CodeDesc initialization less fiddly.
405 
406   static constexpr int kConstantPoolSize = 0;
407   const int instruction_size = pc_offset();
408   const int code_comments_offset = instruction_size - code_comments_size;
409   const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
410   const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
411                                         ? constant_pool_offset
412                                         : handler_table_offset;
413   const int safepoint_table_offset =
414       (safepoint_table_builder == kNoSafepointTable)
415           ? handler_table_offset2
416           : safepoint_table_builder->safepoint_table_offset();
417   const int reloc_info_offset =
418       static_cast<int>(reloc_info_writer.pos() - buffer_->start());
419   CodeDesc::Initialize(desc, this, safepoint_table_offset,
420                        handler_table_offset2, constant_pool_offset,
421                        code_comments_offset, reloc_info_offset);
422 }
423 
Align(int m)424 void Assembler::Align(int m) {
425   DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
426   while ((pc_offset() & (m - 1)) != 0) {
427     nop(0);
428   }
429 }
430 
CodeTargetAlign()431 void Assembler::CodeTargetAlign() { Align(8); }
432 
GetCondition(Instr instr)433 Condition Assembler::GetCondition(Instr instr) {
434   switch (instr & kCondMask) {
435     case BT:
436       return eq;
437     case BF:
438       return ne;
439     default:
440       UNIMPLEMENTED();
441   }
442 }
443 
444 #if V8_TARGET_ARCH_S390X
445 // This code assumes a FIXED_SEQUENCE for 64bit loads (iihf/iilf)
Is64BitLoadIntoIP(SixByteInstr instr1,SixByteInstr instr2)446 bool Assembler::Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2) {
447   // Check the instructions are the iihf/iilf load into ip
448   return (((instr1 >> 32) == 0xC0C8) && ((instr2 >> 32) == 0xC0C9));
449 }
450 #else
451 // This code assumes a FIXED_SEQUENCE for 32bit loads (iilf)
Is32BitLoadIntoIP(SixByteInstr instr)452 bool Assembler::Is32BitLoadIntoIP(SixByteInstr instr) {
453   // Check the instruction is an iilf load into ip/r12.
454   return ((instr >> 32) == 0xC0C9);
455 }
456 #endif
457 
458 // Labels refer to positions in the (to be) generated code.
459 // There are bound, linked, and unused labels.
460 //
461 // Bound labels refer to known positions in the already
462 // generated code. pos() is the position the label refers to.
463 //
464 // Linked labels refer to unknown positions in the code
465 // to be generated; pos() is the position of the last
466 // instruction using the label.
467 
468 // The link chain is terminated by a negative code position (must be aligned)
469 const int kEndOfChain = -4;
470 
471 // Returns the target address of the relative instructions, typically
472 // of the form: pos + imm (where immediate is in # of halfwords for
473 // BR* and LARL).
target_at(int pos)474 int Assembler::target_at(int pos) {
475   SixByteInstr instr = instr_at(pos);
476   // check which type of branch this is 16 or 26 bit offset
477   Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
478 
479   if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
480     int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
481     imm16 <<= 1;  // immediate is in # of halfwords
482     if (imm16 == 0) return kEndOfChain;
483     return pos + imm16;
484   } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
485              BRASL == opcode || LGRL == opcode) {
486     int32_t imm32 =
487         static_cast<int32_t>(instr & (static_cast<uint64_t>(0xFFFFFFFF)));
488     if (LLILF != opcode)
489       imm32 <<= 1;  // BR* + LARL treat immediate in # of halfwords
490     if (imm32 == 0) return kEndOfChain;
491     return pos + imm32;
492   } else if (BRXHG == opcode) {
493     // offset is in bits 16-31 of 48 bit instruction
494     instr = instr >> 16;
495     int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
496     imm16 <<= 1;  // immediate is in # of halfwords
497     if (imm16 == 0) return kEndOfChain;
498     return pos + imm16;
499   }
500 
501   // Unknown condition
502   DCHECK(false);
503   return -1;
504 }
505 
506 // Update the target address of the current relative instruction.
target_at_put(int pos,int target_pos,bool * is_branch)507 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
508   SixByteInstr instr = instr_at(pos);
509   Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
510 
511   if (is_branch != nullptr) {
512     *is_branch =
513         (opcode == BRC || opcode == BRCT || opcode == BRCTG || opcode == BRCL ||
514          opcode == BRASL || opcode == BRXH || opcode == BRXHG);
515   }
516 
517   if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
518     int16_t imm16 = target_pos - pos;
519     instr &= (~0xFFFF);
520     DCHECK(is_int16(imm16));
521     instr_at_put<FourByteInstr>(pos, instr | (imm16 >> 1));
522     return;
523   } else if (BRCL == opcode || LARL == opcode || BRASL == opcode ||
524              LGRL == opcode) {
525     // Immediate is in # of halfwords
526     int32_t imm32 = target_pos - pos;
527     instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
528     instr_at_put<SixByteInstr>(pos, instr | (imm32 >> 1));
529     return;
530   } else if (LLILF == opcode) {
531     DCHECK(target_pos == kEndOfChain || target_pos >= 0);
532     // Emitted label constant, not part of a branch.
533     // Make label relative to Code pointer of generated Code object.
534     int32_t imm32 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
535     instr &= (~static_cast<uint64_t>(0xFFFFFFFF));
536     instr_at_put<SixByteInstr>(pos, instr | imm32);
537     return;
538   } else if (BRXHG == opcode) {
539     // Immediate is in bits 16-31 of 48 bit instruction
540     int32_t imm16 = target_pos - pos;
541     instr &= (0xFFFF0000FFFF);  // clear bits 16-31
542     imm16 &= 0xFFFF;            // clear high halfword
543     imm16 <<= 16;
544     // Immediate is in # of halfwords
545     instr_at_put<SixByteInstr>(pos, instr | (imm16 >> 1));
546     return;
547   }
548   DCHECK(false);
549 }
550 
551 // Returns the maximum number of bits given instruction can address.
max_reach_from(int pos)552 int Assembler::max_reach_from(int pos) {
553   Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
554   // Check which type of instr.  In theory, we can return
555   // the values below + 1, given offset is # of halfwords
556   if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode ||
557       BRXHG == opcode) {
558     return 16;
559   } else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
560              BRASL == opcode || LGRL == opcode) {
561     return 31;  // Using 31 as workaround instead of 32 as
562                 // is_intn(x,32) doesn't work on 32-bit platforms.
563                 // llilf: Emitted label constant, not part of
564                 //        a branch (regexp PushBacktrack).
565   }
566   DCHECK(false);
567   return 16;
568 }
569 
bind_to(Label * L,int pos)570 void Assembler::bind_to(Label* L, int pos) {
571   DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
572   bool is_branch = false;
573   while (L->is_linked()) {
574     int fixup_pos = L->pos();
575 #ifdef DEBUG
576     int32_t offset = pos - fixup_pos;
577     int maxReach = max_reach_from(fixup_pos);
578 #endif
579     next(L);  // call next before overwriting link with target at fixup_pos
580     DCHECK(is_intn(offset, maxReach));
581     target_at_put(fixup_pos, pos, &is_branch);
582   }
583   L->bind_to(pos);
584 
585   // Keep track of the last bound label so we don't eliminate any instructions
586   // before a bound label.
587   if (pos > last_bound_pos_) last_bound_pos_ = pos;
588 }
589 
bind(Label * L)590 void Assembler::bind(Label* L) {
591   DCHECK(!L->is_bound());  // label can only be bound once
592   bind_to(L, pc_offset());
593 }
594 
next(Label * L)595 void Assembler::next(Label* L) {
596   DCHECK(L->is_linked());
597   int link = target_at(L->pos());
598   if (link == kEndOfChain) {
599     L->Unuse();
600   } else {
601     DCHECK_GE(link, 0);
602     L->link_to(link);
603   }
604 }
605 
link(Label * L)606 int Assembler::link(Label* L) {
607   int position;
608   if (L->is_bound()) {
609     position = L->pos();
610   } else {
611     if (L->is_linked()) {
612       position = L->pos();  // L's link
613     } else {
614       // was: target_pos = kEndOfChain;
615       // However, using self to mark the first reference
616       // should avoid most instances of branch offset overflow.  See
617       // target_at() for where this is converted back to kEndOfChain.
618       position = pc_offset();
619     }
620     L->link_to(pc_offset());
621   }
622 
623   return position;
624 }
625 
load_label_offset(Register r1,Label * L)626 void Assembler::load_label_offset(Register r1, Label* L) {
627   int target_pos;
628   int constant;
629   if (L->is_bound()) {
630     target_pos = L->pos();
631     constant = target_pos + (Code::kHeaderSize - kHeapObjectTag);
632   } else {
633     if (L->is_linked()) {
634       target_pos = L->pos();  // L's link
635     } else {
636       // was: target_pos = kEndOfChain;
637       // However, using branch to self to mark the first reference
638       // should avoid most instances of branch offset overflow.  See
639       // target_at() for where this is converted back to kEndOfChain.
640       target_pos = pc_offset();
641     }
642     L->link_to(pc_offset());
643 
644     constant = target_pos - pc_offset();
645   }
646   llilf(r1, Operand(constant));
647 }
648 
649 // Pseudo op - branch on condition
branchOnCond(Condition c,int branch_offset,bool is_bound,bool force_long_branch)650 void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound,
651                              bool force_long_branch) {
652   int offset_in_halfwords = branch_offset / 2;
653   if (is_bound && is_int16(offset_in_halfwords) && !force_long_branch) {
654     brc(c, Operand(offset_in_halfwords));  // short jump
655   } else {
656     brcl(c, Operand(offset_in_halfwords));  // long jump
657   }
658 }
659 
660 // Exception-generating instructions and debugging support.
661 // Stops with a non-negative code less than kNumOfWatchedStops support
662 // enabling/disabling and a counter feature. See simulator-s390.h .
stop(Condition cond,int32_t code,CRegister cr)663 void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
664   if (cond != al) {
665     Label skip;
666     b(NegateCondition(cond), &skip, Label::kNear);
667     bkpt(0);
668     bind(&skip);
669   } else {
670     bkpt(0);
671   }
672 }
673 
bkpt(uint32_t imm16)674 void Assembler::bkpt(uint32_t imm16) {
675   // GDB software breakpoint instruction
676   emit2bytes(0x0001);
677 }
678 
679 // Pseudo instructions.
nop(int type)680 void Assembler::nop(int type) {
681   switch (type) {
682     case 0:
683       lr(r0, r0);
684       break;
685     case DEBUG_BREAK_NOP:
686       // TODO(john.yan): Use a better NOP break
687       oill(r3, Operand::Zero());
688       break;
689     default:
690       UNIMPLEMENTED();
691   }
692 }
693 
694 // -------------------------
695 // Load Address Instructions
696 // -------------------------
697 // Load Address Relative Long
larl(Register r1,Label * l)698 void Assembler::larl(Register r1, Label* l) {
699   larl(r1, Operand(branch_offset(l)));
700 }
701 
lgrl(Register r1,Label * l)702 void Assembler::lgrl(Register r1, Label* l) {
703   lgrl(r1, Operand(branch_offset(l)));
704 }
705 
EnsureSpaceFor(int space_needed)706 void Assembler::EnsureSpaceFor(int space_needed) {
707   if (buffer_space() <= (kGap + space_needed)) {
708     GrowBuffer(space_needed);
709   }
710 }
711 
call(Handle<Code> target,RelocInfo::Mode rmode)712 void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
713   DCHECK(RelocInfo::IsCodeTarget(rmode));
714   EnsureSpace ensure_space(this);
715 
716   RecordRelocInfo(rmode);
717   int32_t target_index = AddCodeTarget(target);
718   brasl(r14, Operand(target_index));
719 }
720 
jump(Handle<Code> target,RelocInfo::Mode rmode,Condition cond)721 void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
722                      Condition cond) {
723   DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
724   EnsureSpace ensure_space(this);
725 
726   RecordRelocInfo(rmode);
727   int32_t target_index = AddCodeTarget(target);
728   brcl(cond, Operand(target_index));
729 }
730 
731 // end of S390instructions
732 
IsNop(SixByteInstr instr,int type)733 bool Assembler::IsNop(SixByteInstr instr, int type) {
734   DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
735   if (DEBUG_BREAK_NOP == type) {
736     return ((instr & 0xFFFFFFFF) == 0xA53B0000);  // oill r3, 0
737   }
738   return ((instr & 0xFFFF) == 0x1800);  // lr r0,r0
739 }
740 
741 // dummy instruction reserved for special use.
dumy(int r1,int x2,int b2,int d2)742 void Assembler::dumy(int r1, int x2, int b2, int d2) {
743 #if defined(USE_SIMULATOR)
744   int op = 0xE353;
745   uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
746                   (static_cast<uint64_t>(r1) & 0xF) * B36 |
747                   (static_cast<uint64_t>(x2) & 0xF) * B32 |
748                   (static_cast<uint64_t>(b2) & 0xF) * B28 |
749                   (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
750                   (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
751                   (static_cast<uint64_t>(op & 0x00FF));
752   emit6bytes(code);
753 #endif
754 }
755 
GrowBuffer(int needed)756 void Assembler::GrowBuffer(int needed) {
757   DCHECK_EQ(buffer_start_, buffer_->start());
758 
759   // Compute new buffer size.
760   int old_size = buffer_->size();
761   int new_size = std::min(2 * old_size, old_size + 1 * MB);
762   int space = buffer_space() + (new_size - old_size);
763   new_size += (space < needed) ? needed - space : 0;
764 
765   // Some internal data structures overflow for very large buffers,
766   // they must ensure that kMaximalBufferSize is not too large.
767   if (new_size > kMaximalBufferSize) {
768     V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
769   }
770 
771   // Set up new buffer.
772   std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
773   DCHECK_EQ(new_size, new_buffer->size());
774   byte* new_start = new_buffer->start();
775 
776   // Copy the data.
777   intptr_t pc_delta = new_start - buffer_start_;
778   intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
779   size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
780   MemMove(new_start, buffer_start_, pc_offset());
781   MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
782           reloc_size);
783 
784   // Switch buffers.
785   buffer_ = std::move(new_buffer);
786   buffer_start_ = new_start;
787   pc_ += pc_delta;
788   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
789                                reloc_info_writer.last_pc() + pc_delta);
790 
791   // None of our relocation types are pc relative pointing outside the code
792   // buffer nor pc absolute pointing inside the code buffer, so there is no need
793   // to relocate any emitted relocation entries.
794 }
795 
db(uint8_t data)796 void Assembler::db(uint8_t data) {
797   CheckBuffer();
798   *reinterpret_cast<uint8_t*>(pc_) = data;
799   pc_ += sizeof(uint8_t);
800 }
801 
dd(uint32_t data,RelocInfo::Mode rmode)802 void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
803   CheckBuffer();
804   if (!RelocInfo::IsNoInfo(rmode)) {
805     DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
806            RelocInfo::IsLiteralConstant(rmode));
807     RecordRelocInfo(rmode);
808   }
809   *reinterpret_cast<uint32_t*>(pc_) = data;
810   pc_ += sizeof(uint32_t);
811 }
812 
dq(uint64_t value,RelocInfo::Mode rmode)813 void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
814   CheckBuffer();
815   if (!RelocInfo::IsNoInfo(rmode)) {
816     DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
817            RelocInfo::IsLiteralConstant(rmode));
818     RecordRelocInfo(rmode);
819   }
820   *reinterpret_cast<uint64_t*>(pc_) = value;
821   pc_ += sizeof(uint64_t);
822 }
823 
dp(uintptr_t data,RelocInfo::Mode rmode)824 void Assembler::dp(uintptr_t data, RelocInfo::Mode rmode) {
825   CheckBuffer();
826   if (!RelocInfo::IsNoInfo(rmode)) {
827     DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
828            RelocInfo::IsLiteralConstant(rmode));
829     RecordRelocInfo(rmode);
830   }
831   *reinterpret_cast<uintptr_t*>(pc_) = data;
832   pc_ += sizeof(uintptr_t);
833 }
834 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)835 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
836   if (!ShouldRecordRelocInfo(rmode)) return;
837   DeferredRelocInfo rinfo(pc_offset(), rmode, data);
838   relocations_.push_back(rinfo);
839 }
840 
emit_label_addr(Label * label)841 void Assembler::emit_label_addr(Label* label) {
842   CheckBuffer();
843   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
844   int position = link(label);
845   DCHECK(label->is_bound());
846   // Keep internal references relative until EmitRelocations.
847   dp(position);
848 }
849 
EmitRelocations()850 void Assembler::EmitRelocations() {
851   EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
852 
853   for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
854        it != relocations_.end(); it++) {
855     RelocInfo::Mode rmode = it->rmode();
856     Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
857     RelocInfo rinfo(pc, rmode, it->data(), Code());
858 
859     // Fix up internal references now that they are guaranteed to be bound.
860     if (RelocInfo::IsInternalReference(rmode)) {
861       // Jump table entry
862       Address pos = Memory<Address>(pc);
863       Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
864     } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
865       // mov sequence
866       Address pos = target_address_at(pc, 0);
867       set_target_address_at(pc, 0,
868                             reinterpret_cast<Address>(buffer_start_) + pos,
869                             SKIP_ICACHE_FLUSH);
870     }
871 
872     reloc_info_writer.Write(&rinfo);
873   }
874 }
875 
UseScratchRegisterScope(Assembler * assembler)876 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
877     : assembler_(assembler),
878       old_available_(*assembler->GetScratchRegisterList()) {}
879 
~UseScratchRegisterScope()880 UseScratchRegisterScope::~UseScratchRegisterScope() {
881   *assembler_->GetScratchRegisterList() = old_available_;
882 }
883 
Acquire()884 Register UseScratchRegisterScope::Acquire() {
885   RegList* available = assembler_->GetScratchRegisterList();
886   DCHECK_NOT_NULL(available);
887   return available->PopFirst();
888 }
889 }  // namespace internal
890 }  // namespace v8
891 #endif  // V8_TARGET_ARCH_S390
892