• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/wasm/jump-table-assembler.h"
6 
7 #include "src/assembler-inl.h"
8 #include "src/macro-assembler-inl.h"
9 
10 namespace v8 {
11 namespace internal {
12 namespace wasm {
13 
14 // The implementation is compact enough to implement it inline here. If it gets
15 // much bigger, we might want to split it in a separate file per architecture.
16 #if V8_TARGET_ARCH_X64
EmitLazyCompileJumpSlot(uint32_t func_index,Address lazy_compile_target)17 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
18                                                  Address lazy_compile_target) {
19   // TODO(clemensh): Try more efficient sequences.
20   // Alternative 1:
21   // [header]:  mov r10, [lazy_compile_target]
22   //            jmp r10
23   // [slot 0]:  push [0]
24   //            jmp [header]  // pc-relative --> slot size: 10 bytes
25   //
26   // Alternative 2:
27   // [header]:  lea r10, [rip - [header]]
28   //            shr r10, 3  // compute index from offset
29   //            push r10
30   //            mov r10, [lazy_compile_target]
31   //            jmp r10
32   // [slot 0]:  call [header]
33   //            ret   // -> slot size: 5 bytes
34 
35   // Use a push, because mov to an extended register takes 6 bytes.
36   pushq(Immediate(func_index));                           // max 5 bytes
37   movq(kScratchRegister, uint64_t{lazy_compile_target});  // max 10 bytes
38   jmp(kScratchRegister);                                  // 3 bytes
39 
40   PatchConstPool();  // force patching entries for partial const pool
41 }
42 
EmitJumpSlot(Address target)43 void JumpTableAssembler::EmitJumpSlot(Address target) {
44   movq(kScratchRegister, static_cast<uint64_t>(target));
45   jmp(kScratchRegister);
46 }
47 
NopBytes(int bytes)48 void JumpTableAssembler::NopBytes(int bytes) {
49   DCHECK_LE(0, bytes);
50   Nop(bytes);
51 }
52 
53 #elif V8_TARGET_ARCH_IA32
54 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
55                                                  Address lazy_compile_target) {
56   mov(edi, func_index);                       // 5 bytes
57   jmp(lazy_compile_target, RelocInfo::NONE);  // 5 bytes
58 }
59 
60 void JumpTableAssembler::EmitJumpSlot(Address target) {
61   jmp(target, RelocInfo::NONE);
62 }
63 
64 void JumpTableAssembler::NopBytes(int bytes) {
65   DCHECK_LE(0, bytes);
66   Nop(bytes);
67 }
68 
69 #elif V8_TARGET_ARCH_ARM
70 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
71                                                  Address lazy_compile_target) {
72   // Load function index to r4.
73   // This generates [movw, movt] on ARMv7 and later, [ldr, constant pool marker,
74   // constant] on ARMv6.
75   Move32BitImmediate(r4, Operand(func_index));
76   // EmitJumpSlot emits either [b], [movw, movt, mov] (ARMv7+), or [ldr,
77   // constant].
78   // In total, this is <=5 instructions on all architectures.
79   // TODO(arm): Optimize this for code size; lazy compile is not performance
80   // critical, as it's only executed once per function.
81   EmitJumpSlot(lazy_compile_target);
82 }
83 
84 void JumpTableAssembler::EmitJumpSlot(Address target) {
85   // Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
86   // mode used below, we need this to allow concurrent patching of this slot.
87   Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL));
88   CheckConstPool(true, false);  // force emit of const pool
89 }
90 
91 void JumpTableAssembler::NopBytes(int bytes) {
92   DCHECK_LE(0, bytes);
93   DCHECK_EQ(0, bytes % kInstrSize);
94   for (; bytes > 0; bytes -= kInstrSize) {
95     nop();
96   }
97 }
98 
99 #elif V8_TARGET_ARCH_ARM64
100 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
101                                                  Address lazy_compile_target) {
102   Mov(w8, func_index);                         // max. 2 instr
103   Jump(lazy_compile_target, RelocInfo::NONE);  // 1 instr
104 }
105 
106 void JumpTableAssembler::EmitJumpSlot(Address target) {
107   // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
108   // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
109   // sure concurrent patching is still supported.
110   Jump(target, RelocInfo::NONE);
111 }
112 
113 void JumpTableAssembler::NopBytes(int bytes) {
114   DCHECK_LE(0, bytes);
115   DCHECK_EQ(0, bytes % kInstrSize);
116   for (; bytes > 0; bytes -= kInstrSize) {
117     nop();
118   }
119 }
120 
121 #elif V8_TARGET_ARCH_S390
122 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
123                                                  Address lazy_compile_target) {
124   // Load function index to r7. 6 bytes
125   lgfi(r7, Operand(func_index));
126   // Jump to {lazy_compile_target}. 6 bytes or 12 bytes
127   mov(r1, Operand(lazy_compile_target));
128   b(r1);  // 2 bytes
129 }
130 
131 void JumpTableAssembler::EmitJumpSlot(Address target) {
132   mov(r1, Operand(target));
133   b(r1);
134 }
135 
136 void JumpTableAssembler::NopBytes(int bytes) {
137   DCHECK_LE(0, bytes);
138   DCHECK_EQ(0, bytes % 2);
139   for (; bytes > 0; bytes -= 2) {
140     nop(0);
141   }
142 }
143 
144 #elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
145 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
146                                                  Address lazy_compile_target) {
147   li(t0, func_index);  // max. 2 instr
148   // Jump produces max. 4 instructions for 32-bit platform
149   // and max. 6 instructions for 64-bit platform.
150   Jump(lazy_compile_target, RelocInfo::NONE);
151 }
152 
153 void JumpTableAssembler::EmitJumpSlot(Address target) {
154   Jump(target, RelocInfo::NONE);
155 }
156 
157 void JumpTableAssembler::NopBytes(int bytes) {
158   DCHECK_LE(0, bytes);
159   DCHECK_EQ(0, bytes % kInstrSize);
160   for (; bytes > 0; bytes -= kInstrSize) {
161     nop();
162   }
163 }
164 
165 #elif V8_TARGET_ARCH_PPC
166 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
167                                                  Address lazy_compile_target) {
168   // Load function index to r8. max 5 instrs
169   mov(r15, Operand(func_index));
170   // Jump to {lazy_compile_target}. max 5 instrs
171   mov(r0, Operand(lazy_compile_target));
172   mtctr(r0);
173   bctr();
174 }
175 
176 void JumpTableAssembler::EmitJumpSlot(Address target) {
177   mov(r0, Operand(target));
178   mtctr(r0);
179   bctr();
180 }
181 
182 void JumpTableAssembler::NopBytes(int bytes) {
183   DCHECK_LE(0, bytes);
184   DCHECK_EQ(0, bytes % 4);
185   for (; bytes > 0; bytes -= 4) {
186     nop(0);
187   }
188 }
189 
190 #else
191 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
192                                                  Address lazy_compile_target) {
193   UNIMPLEMENTED();
194 }
195 
196 void JumpTableAssembler::EmitJumpSlot(Address target) { UNIMPLEMENTED(); }
197 
198 void JumpTableAssembler::NopBytes(int bytes) {
199   DCHECK_LE(0, bytes);
200   UNIMPLEMENTED();
201 }
202 #endif
203 
204 }  // namespace wasm
205 }  // namespace internal
206 }  // namespace v8
207