• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 #include "src/execution/clobber-registers.h"
5 
6 #include "src/base/build_config.h"
7 
8 // Check both {HOST_ARCH} and {TARGET_ARCH} to disable the functionality of this
9 // file for cross-compilation. The reason is that the inline assembly code below
10 // does not work for cross-compilation.
11 #if V8_HOST_ARCH_ARM && V8_TARGET_ARCH_ARM
12 #include "src/codegen/arm/register-arm.h"
13 #elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64
14 #include "src/codegen/arm64/register-arm64.h"
15 #elif V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32
16 #include "src/codegen/ia32/register-ia32.h"
17 #elif V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64
18 #include "src/codegen/x64/register-x64.h"
19 #elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64
20 #include "src/codegen/loong64/register-loong64.h"
21 #elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS
22 #include "src/codegen/mips/register-mips.h"
23 #elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64
24 #include "src/codegen/mips64/register-mips64.h"
25 #endif
26 
27 namespace v8 {
28 namespace internal {
29 
30 #if V8_CC_MSVC
31 // msvc only support inline assembly on x86
32 #if V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32
33 #define CLOBBER_REGISTER(R) __asm xorps R, R
34 
35 #endif
36 
37 #else  // !V8_CC_MSVC
38 
39 #if (V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64) || \
40     (V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32)
41 #define CLOBBER_REGISTER(R) \
42   __asm__ volatile(         \
43       "xorps "              \
44       "%%" #R               \
45       ","                   \
46       "%%" #R ::            \
47           :);
48 
49 #elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64
50 #define CLOBBER_REGISTER(R) __asm__ volatile("fmov " #R ",xzr" :::);
51 
52 #elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64
53 #define CLOBBER_REGISTER(R) __asm__ volatile("movgr2fr.d $" #R ",$zero" :::);
54 
55 #elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS
56 #define CLOBBER_USE_REGISTER(R) __asm__ volatile("mtc1 $zero,$" #R :::);
57 
58 #elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64
59 #define CLOBBER_USE_REGISTER(R) __asm__ volatile("dmtc1 $zero,$" #R :::);
60 
61 #endif  // V8_HOST_ARCH_XXX && V8_TARGET_ARCH_XXX
62 
63 #endif  // V8_CC_MSVC
64 
ClobberDoubleRegisters(double x1,double x2,double x3,double x4)65 double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
66   // clobber all double registers
67 
68 #if defined(CLOBBER_REGISTER)
69   DOUBLE_REGISTERS(CLOBBER_REGISTER)
70 #undef CLOBBER_REGISTER
71   return 0;
72 
73 #elif defined(CLOBBER_USE_REGISTER)
74   DOUBLE_USE_REGISTERS(CLOBBER_USE_REGISTER)
75 #undef CLOBBER_USE_REGISTER
76   return 0;
77 
78 #else
79   // TODO(v8:11798): This clobbers only subset of registers depending on
80   // compiler, Rewrite this in assembly to really clobber all registers. GCC for
81   // ia32 uses the FPU and does not touch XMM registers.
82   return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
83 #endif  // CLOBBER_REGISTER
84 }
85 
86 }  // namespace internal
87 }  // namespace v8
88