1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <stdlib.h>
6 #include <cmath>
7 #include <cstdarg>
8 #include <type_traits>
9
10 #if V8_TARGET_ARCH_ARM64
11
12 #include "src/arm64/decoder-arm64-inl.h"
13 #include "src/arm64/simulator-arm64.h"
14 #include "src/assembler-inl.h"
15 #include "src/codegen.h"
16 #include "src/disasm.h"
17 #include "src/macro-assembler.h"
18 #include "src/objects-inl.h"
19 #include "src/ostreams.h"
20 #include "src/runtime/runtime-utils.h"
21
22 namespace v8 {
23 namespace internal {
24
25 #if defined(USE_SIMULATOR)
26
27
28 // This macro provides a platform independent use of sscanf. The reason for
29 // SScanF not being implemented in a platform independent way through
30 // ::v8::internal::OS in the same way as SNPrintF is that the
31 // Windows C Run-Time Library does not provide vsscanf.
32 #define SScanF sscanf // NOLINT
33
34
35 // Helpers for colors.
36 #define COLOUR(colour_code) "\033[0;" colour_code "m"
37 #define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
38 #define NORMAL ""
39 #define GREY "30"
40 #define RED "31"
41 #define GREEN "32"
42 #define YELLOW "33"
43 #define BLUE "34"
44 #define MAGENTA "35"
45 #define CYAN "36"
46 #define WHITE "37"
47
48 typedef char const * const TEXT_COLOUR;
49 TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
50 TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : "";
51 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : "";
52 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : "";
53 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : "";
54 TEXT_COLOUR clr_vreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
55 TEXT_COLOUR clr_vreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : "";
56 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : "";
57 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
58 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
59 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
60
61 // static
62 base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
63 LAZY_INSTANCE_INITIALIZER;
64
65 // This is basically the same as PrintF, with a guard for FLAG_trace_sim.
TraceSim(const char * format,...)66 void Simulator::TraceSim(const char* format, ...) {
67 if (FLAG_trace_sim) {
68 va_list arguments;
69 va_start(arguments, format);
70 base::OS::VFPrint(stream_, format, arguments);
71 va_end(arguments);
72 }
73 }
74
75 const Instruction* Simulator::kEndOfSimAddress = nullptr;
76
SetBits(int msb,int lsb,uint32_t bits)77 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
78 int width = msb - lsb + 1;
79 DCHECK(is_uintn(bits, width) || is_intn(bits, width));
80
81 bits <<= lsb;
82 uint32_t mask = ((1 << width) - 1) << lsb;
83 DCHECK_EQ(mask & write_ignore_mask_, 0);
84
85 value_ = (value_ & ~mask) | (bits & mask);
86 }
87
88
DefaultValueFor(SystemRegister id)89 SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
90 switch (id) {
91 case NZCV:
92 return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
93 case FPCR:
94 return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
95 default:
96 UNREACHABLE();
97 }
98 }
99
100
101 // Get the active Simulator for the current thread.
current(Isolate * isolate)102 Simulator* Simulator::current(Isolate* isolate) {
103 Isolate::PerIsolateThreadData* isolate_data =
104 isolate->FindOrAllocatePerThreadDataForThisThread();
105 DCHECK_NOT_NULL(isolate_data);
106
107 Simulator* sim = isolate_data->simulator();
108 if (sim == nullptr) {
109 if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) {
110 sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate);
111 } else {
112 sim = new Decoder<Simulator>();
113 sim->isolate_ = isolate;
114 }
115 isolate_data->set_simulator(sim);
116 }
117 return sim;
118 }
119
CallImpl(Address entry,CallArgument * args)120 void Simulator::CallImpl(Address entry, CallArgument* args) {
121 int index_x = 0;
122 int index_d = 0;
123
124 std::vector<int64_t> stack_args(0);
125 for (int i = 0; !args[i].IsEnd(); i++) {
126 CallArgument arg = args[i];
127 if (arg.IsX() && (index_x < 8)) {
128 set_xreg(index_x++, arg.bits());
129 } else if (arg.IsD() && (index_d < 8)) {
130 set_dreg_bits(index_d++, arg.bits());
131 } else {
132 DCHECK(arg.IsD() || arg.IsX());
133 stack_args.push_back(arg.bits());
134 }
135 }
136
137 // Process stack arguments, and make sure the stack is suitably aligned.
138 uintptr_t original_stack = sp();
139 uintptr_t entry_stack = original_stack -
140 stack_args.size() * sizeof(stack_args[0]);
141 if (base::OS::ActivationFrameAlignment() != 0) {
142 entry_stack &= -base::OS::ActivationFrameAlignment();
143 }
144 char * stack = reinterpret_cast<char*>(entry_stack);
145 std::vector<int64_t>::const_iterator it;
146 for (it = stack_args.begin(); it != stack_args.end(); it++) {
147 memcpy(stack, &(*it), sizeof(*it));
148 stack += sizeof(*it);
149 }
150
151 DCHECK(reinterpret_cast<uintptr_t>(stack) <= original_stack);
152 set_sp(entry_stack);
153
154 // Call the generated code.
155 set_pc(entry);
156 set_lr(kEndOfSimAddress);
157 CheckPCSComplianceAndRun();
158
159 set_sp(original_stack);
160 }
161
CheckPCSComplianceAndRun()162 void Simulator::CheckPCSComplianceAndRun() {
163 // Adjust JS-based stack limit to C-based stack limit.
164 isolate_->stack_guard()->AdjustStackLimitForSimulator();
165
166 #ifdef DEBUG
167 DCHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
168 DCHECK_EQ(kNumberOfCalleeSavedVRegisters, kCalleeSavedV.Count());
169
170 int64_t saved_registers[kNumberOfCalleeSavedRegisters];
171 uint64_t saved_fpregisters[kNumberOfCalleeSavedVRegisters];
172
173 CPURegList register_list = kCalleeSaved;
174 CPURegList fpregister_list = kCalleeSavedV;
175
176 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
177 // x31 is not a caller saved register, so no need to specify if we want
178 // the stack or zero.
179 saved_registers[i] = xreg(register_list.PopLowestIndex().code());
180 }
181 for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
182 saved_fpregisters[i] =
183 dreg_bits(fpregister_list.PopLowestIndex().code());
184 }
185 int64_t original_stack = sp();
186 #endif
187 // Start the simulation!
188 Run();
189 #ifdef DEBUG
190 DCHECK_EQ(original_stack, sp());
191 // Check that callee-saved registers have been preserved.
192 register_list = kCalleeSaved;
193 fpregister_list = kCalleeSavedV;
194 for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
195 DCHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
196 }
197 for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
198 DCHECK(saved_fpregisters[i] ==
199 dreg_bits(fpregister_list.PopLowestIndex().code()));
200 }
201
202 // Corrupt caller saved register minus the return regiters.
203
204 // In theory x0 to x7 can be used for return values, but V8 only uses x0, x1
205 // for now .
206 register_list = kCallerSaved;
207 register_list.Remove(x0);
208 register_list.Remove(x1);
209
210 // In theory d0 to d7 can be used for return values, but V8 only uses d0
211 // for now .
212 fpregister_list = kCallerSavedV;
213 fpregister_list.Remove(d0);
214
215 CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue);
216 CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue);
217 #endif
218 }
219
220
221 #ifdef DEBUG
222 // The least significant byte of the curruption value holds the corresponding
223 // register's code.
CorruptRegisters(CPURegList * list,uint64_t value)224 void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
225 if (list->type() == CPURegister::kRegister) {
226 while (!list->IsEmpty()) {
227 unsigned code = list->PopLowestIndex().code();
228 set_xreg(code, value | code);
229 }
230 } else {
231 DCHECK_EQ(list->type(), CPURegister::kVRegister);
232 while (!list->IsEmpty()) {
233 unsigned code = list->PopLowestIndex().code();
234 set_dreg_bits(code, value | code);
235 }
236 }
237 }
238
239
CorruptAllCallerSavedCPURegisters()240 void Simulator::CorruptAllCallerSavedCPURegisters() {
241 // Corrupt alters its parameter so copy them first.
242 CPURegList register_list = kCallerSaved;
243 CPURegList fpregister_list = kCallerSavedV;
244
245 CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue);
246 CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue);
247 }
248 #endif
249
250
251 // Extending the stack by 2 * 64 bits is required for stack alignment purposes.
PushAddress(uintptr_t address)252 uintptr_t Simulator::PushAddress(uintptr_t address) {
253 DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
254 intptr_t new_sp = sp() - 2 * kXRegSize;
255 uintptr_t* alignment_slot =
256 reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
257 memcpy(alignment_slot, &kSlotsZapValue, kPointerSize);
258 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
259 memcpy(stack_slot, &address, kPointerSize);
260 set_sp(new_sp);
261 return new_sp;
262 }
263
264
PopAddress()265 uintptr_t Simulator::PopAddress() {
266 intptr_t current_sp = sp();
267 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
268 uintptr_t address = *stack_slot;
269 DCHECK_LT(sizeof(uintptr_t), 2 * kXRegSize);
270 set_sp(current_sp + 2 * kXRegSize);
271 return address;
272 }
273
274
275 // Returns the limit of the stack area to enable checking for stack overflows.
StackLimit(uintptr_t c_limit) const276 uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
277 // The simulator uses a separate JS stack. If we have exhausted the C stack,
278 // we also drop down the JS limit to reflect the exhaustion on the JS stack.
279 if (GetCurrentStackPosition() < c_limit) {
280 return reinterpret_cast<uintptr_t>(get_sp());
281 }
282
283 // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
284 // to prevent overrunning the stack when pushing values.
285 return stack_limit_ + 1024;
286 }
287
SetRedirectInstruction(Instruction * instruction)288 void Simulator::SetRedirectInstruction(Instruction* instruction) {
289 instruction->SetInstructionBits(
290 HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
291 }
292
Simulator(Decoder<DispatchingDecoderVisitor> * decoder,Isolate * isolate,FILE * stream)293 Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
294 Isolate* isolate, FILE* stream)
295 : decoder_(decoder),
296 last_debugger_input_(nullptr),
297 log_parameters_(NO_PARAM),
298 isolate_(isolate) {
299 // Setup the decoder.
300 decoder_->AppendVisitor(this);
301
302 Init(stream);
303
304 if (FLAG_trace_sim) {
305 decoder_->InsertVisitorBefore(print_disasm_, this);
306 log_parameters_ = LOG_ALL;
307 }
308
309 if (FLAG_log_instruction_stats) {
310 instrument_ = new Instrument(FLAG_log_instruction_file,
311 FLAG_log_instruction_period);
312 decoder_->AppendVisitor(instrument_);
313 }
314 }
315
Simulator()316 Simulator::Simulator()
317 : decoder_(nullptr),
318 last_debugger_input_(nullptr),
319 log_parameters_(NO_PARAM),
320 isolate_(nullptr) {
321 Init(stdout);
322 CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
323 }
324
325
Init(FILE * stream)326 void Simulator::Init(FILE* stream) {
327 ResetState();
328
329 // Allocate and setup the simulator stack.
330 stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
331 stack_ = reinterpret_cast<uintptr_t>(new byte[stack_size_]);
332 stack_limit_ = stack_ + stack_protection_size_;
333 uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
334 // The stack pointer must be 16-byte aligned.
335 set_sp(tos & ~0xFUL);
336
337 stream_ = stream;
338 print_disasm_ = new PrintDisassembler(stream_);
339
340 // The debugger needs to disassemble code without the simulator executing an
341 // instruction, so we create a dedicated decoder.
342 disassembler_decoder_ = new Decoder<DispatchingDecoderVisitor>();
343 disassembler_decoder_->AppendVisitor(print_disasm_);
344 }
345
346
ResetState()347 void Simulator::ResetState() {
348 // Reset the system registers.
349 nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
350 fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
351
352 // Reset registers to 0.
353 pc_ = nullptr;
354 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
355 set_xreg(i, 0xBADBEEF);
356 }
357 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
358 // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
359 set_dreg_bits(i, 0x7FF000007F800001UL);
360 }
361 // Returning to address 0 exits the Simulator.
362 set_lr(kEndOfSimAddress);
363
364 // Reset debug helpers.
365 breakpoints_.empty();
366 break_on_next_ = false;
367 }
368
369
~Simulator()370 Simulator::~Simulator() {
371 global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
372 delete[] reinterpret_cast<byte*>(stack_);
373 if (FLAG_log_instruction_stats) {
374 delete instrument_;
375 }
376 delete disassembler_decoder_;
377 delete print_disasm_;
378 DeleteArray(last_debugger_input_);
379 delete decoder_;
380 }
381
382
Run()383 void Simulator::Run() {
384 // Flush any written registers before executing anything, so that
385 // manually-set registers are logged _before_ the first instruction.
386 LogAllWrittenRegisters();
387
388 pc_modified_ = false;
389 while (pc_ != kEndOfSimAddress) {
390 ExecuteInstruction();
391 }
392 }
393
394
RunFrom(Instruction * start)395 void Simulator::RunFrom(Instruction* start) {
396 set_pc(start);
397 Run();
398 }
399
400
401 // Calls into the V8 runtime are based on this very simple interface.
402 // Note: To be able to return two values from some calls the code in runtime.cc
403 // uses the ObjectPair structure.
404 // The simulator assumes all runtime calls return two 64-bits values. If they
405 // don't, register x1 is clobbered. This is fine because x1 is caller-saved.
406 typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
407 int64_t arg2, int64_t arg3,
408 int64_t arg4, int64_t arg5,
409 int64_t arg6, int64_t arg7,
410 int64_t arg8);
411
412 typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
413 typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
414 typedef double (*SimulatorRuntimeFPCall)(double arg1);
415 typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
416
417 // This signature supports direct call in to API function native callback
418 // (refer to InvocationCallback in v8.h).
419 typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
420 typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
421
422 // This signature supports direct call to accessor getter callback.
423 typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
424 typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
425 void* arg2);
426
DoRuntimeCall(Instruction * instr)427 void Simulator::DoRuntimeCall(Instruction* instr) {
428 Redirection* redirection = Redirection::FromInstruction(instr);
429
430 // The called C code might itself call simulated code, so any
431 // caller-saved registers (including lr) could still be clobbered by a
432 // redirected call.
433 Instruction* return_address = lr();
434
435 int64_t external =
436 reinterpret_cast<int64_t>(redirection->external_function());
437
438 TraceSim("Call to host function at %p\n", redirection->external_function());
439
440 // SP must be 16-byte-aligned at the call interface.
441 bool stack_alignment_exception = ((sp() & 0xF) != 0);
442 if (stack_alignment_exception) {
443 TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
444 FATAL("ALIGNMENT EXCEPTION");
445 }
446
447 int64_t* stack_pointer = reinterpret_cast<int64_t*>(sp());
448
449 const int64_t arg0 = xreg(0);
450 const int64_t arg1 = xreg(1);
451 const int64_t arg2 = xreg(2);
452 const int64_t arg3 = xreg(3);
453 const int64_t arg4 = xreg(4);
454 const int64_t arg5 = xreg(5);
455 const int64_t arg6 = xreg(6);
456 const int64_t arg7 = xreg(7);
457 const int64_t arg8 = stack_pointer[0];
458 STATIC_ASSERT(kMaxCParameters == 9);
459
460 switch (redirection->type()) {
461 default:
462 TraceSim("Type: Unknown.\n");
463 UNREACHABLE();
464 break;
465
466 case ExternalReference::BUILTIN_CALL:
467 case ExternalReference::BUILTIN_CALL_PAIR: {
468 // Object* f(v8::internal::Arguments) or
469 // ObjectPair f(v8::internal::Arguments).
470 TraceSim("Type: BUILTIN_CALL\n");
471 SimulatorRuntimeCall target =
472 reinterpret_cast<SimulatorRuntimeCall>(external);
473
474 // We don't know how many arguments are being passed, but we can
475 // pass 8 without touching the stack. They will be ignored by the
476 // host function if they aren't used.
477 TraceSim(
478 "Arguments: "
479 "0x%016" PRIx64 ", 0x%016" PRIx64
480 ", "
481 "0x%016" PRIx64 ", 0x%016" PRIx64
482 ", "
483 "0x%016" PRIx64 ", 0x%016" PRIx64
484 ", "
485 "0x%016" PRIx64 ", 0x%016" PRIx64
486 ", "
487 "0x%016" PRIx64,
488 arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
489 ObjectPair result =
490 target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
491 TraceSim("Returned: {%p, %p}\n", static_cast<void*>(result.x),
492 static_cast<void*>(result.y));
493 #ifdef DEBUG
494 CorruptAllCallerSavedCPURegisters();
495 #endif
496 set_xreg(0, reinterpret_cast<int64_t>(result.x));
497 set_xreg(1, reinterpret_cast<int64_t>(result.y));
498 break;
499 }
500
501 case ExternalReference::DIRECT_API_CALL: {
502 // void f(v8::FunctionCallbackInfo&)
503 TraceSim("Type: DIRECT_API_CALL\n");
504 SimulatorRuntimeDirectApiCall target =
505 reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
506 TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
507 target(xreg(0));
508 TraceSim("No return value.");
509 #ifdef DEBUG
510 CorruptAllCallerSavedCPURegisters();
511 #endif
512 break;
513 }
514
515 case ExternalReference::BUILTIN_COMPARE_CALL: {
516 // int f(double, double)
517 TraceSim("Type: BUILTIN_COMPARE_CALL\n");
518 SimulatorRuntimeCompareCall target =
519 reinterpret_cast<SimulatorRuntimeCompareCall>(external);
520 TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
521 int64_t result = target(dreg(0), dreg(1));
522 TraceSim("Returned: %" PRId64 "\n", result);
523 #ifdef DEBUG
524 CorruptAllCallerSavedCPURegisters();
525 #endif
526 set_xreg(0, result);
527 break;
528 }
529
530 case ExternalReference::BUILTIN_FP_CALL: {
531 // double f(double)
532 TraceSim("Type: BUILTIN_FP_CALL\n");
533 SimulatorRuntimeFPCall target =
534 reinterpret_cast<SimulatorRuntimeFPCall>(external);
535 TraceSim("Argument: %f\n", dreg(0));
536 double result = target(dreg(0));
537 TraceSim("Returned: %f\n", result);
538 #ifdef DEBUG
539 CorruptAllCallerSavedCPURegisters();
540 #endif
541 set_dreg(0, result);
542 break;
543 }
544
545 case ExternalReference::BUILTIN_FP_FP_CALL: {
546 // double f(double, double)
547 TraceSim("Type: BUILTIN_FP_FP_CALL\n");
548 SimulatorRuntimeFPFPCall target =
549 reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
550 TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
551 double result = target(dreg(0), dreg(1));
552 TraceSim("Returned: %f\n", result);
553 #ifdef DEBUG
554 CorruptAllCallerSavedCPURegisters();
555 #endif
556 set_dreg(0, result);
557 break;
558 }
559
560 case ExternalReference::BUILTIN_FP_INT_CALL: {
561 // double f(double, int)
562 TraceSim("Type: BUILTIN_FP_INT_CALL\n");
563 SimulatorRuntimeFPIntCall target =
564 reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
565 TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
566 double result = target(dreg(0), wreg(0));
567 TraceSim("Returned: %f\n", result);
568 #ifdef DEBUG
569 CorruptAllCallerSavedCPURegisters();
570 #endif
571 set_dreg(0, result);
572 break;
573 }
574
575 case ExternalReference::DIRECT_GETTER_CALL: {
576 // void f(Local<String> property, PropertyCallbackInfo& info)
577 TraceSim("Type: DIRECT_GETTER_CALL\n");
578 SimulatorRuntimeDirectGetterCall target =
579 reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
580 TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
581 xreg(0), xreg(1));
582 target(xreg(0), xreg(1));
583 TraceSim("No return value.");
584 #ifdef DEBUG
585 CorruptAllCallerSavedCPURegisters();
586 #endif
587 break;
588 }
589
590 case ExternalReference::PROFILING_API_CALL: {
591 // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
592 TraceSim("Type: PROFILING_API_CALL\n");
593 SimulatorRuntimeProfilingApiCall target =
594 reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
595 void* arg1 = Redirection::ReverseRedirection(xreg(1));
596 TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
597 target(xreg(0), arg1);
598 TraceSim("No return value.");
599 #ifdef DEBUG
600 CorruptAllCallerSavedCPURegisters();
601 #endif
602 break;
603 }
604
605 case ExternalReference::PROFILING_GETTER_CALL: {
606 // void f(Local<String> property, PropertyCallbackInfo& info,
607 // AccessorNameGetterCallback callback)
608 TraceSim("Type: PROFILING_GETTER_CALL\n");
609 SimulatorRuntimeProfilingGetterCall target =
610 reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
611 external);
612 void* arg2 = Redirection::ReverseRedirection(xreg(2));
613 TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
614 xreg(0), xreg(1), arg2);
615 target(xreg(0), xreg(1), arg2);
616 TraceSim("No return value.");
617 #ifdef DEBUG
618 CorruptAllCallerSavedCPURegisters();
619 #endif
620 break;
621 }
622 }
623
624 set_lr(return_address);
625 set_pc(return_address);
626 }
627
628 const char* Simulator::xreg_names[] = {
629 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
630 "x11", "x12", "x13", "x14", "x15", "ip0", "ip1", "x18", "x19", "x20", "x21",
631 "x22", "x23", "x24", "x25", "x26", "cp", "x28", "fp", "lr", "xzr", "sp"};
632
633 const char* Simulator::wreg_names[] = {
634 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
635 "w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
636 "w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
637 "wcp", "w28", "wfp", "wlr", "wzr", "wsp"};
638
639 const char* Simulator::sreg_names[] = {
640 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
641 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
642 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
643 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
644
645 const char* Simulator::dreg_names[] = {
646 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
647 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
648 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
649 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
650
651 const char* Simulator::vreg_names[] = {
652 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
653 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
654 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
655 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
656
657
WRegNameForCode(unsigned code,Reg31Mode mode)658 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
659 static_assert(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1),
660 "Array must be large enough to hold all register names.");
661 DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
662 // The modulo operator has no effect here, but it silences a broken GCC
663 // warning about out-of-bounds array accesses.
664 code %= kNumberOfRegisters;
665
666 // If the code represents the stack pointer, index the name after zr.
667 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
668 code = kZeroRegCode + 1;
669 }
670 return wreg_names[code];
671 }
672
673
XRegNameForCode(unsigned code,Reg31Mode mode)674 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
675 static_assert(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1),
676 "Array must be large enough to hold all register names.");
677 DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
678 code %= kNumberOfRegisters;
679
680 // If the code represents the stack pointer, index the name after zr.
681 if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
682 code = kZeroRegCode + 1;
683 }
684 return xreg_names[code];
685 }
686
687
SRegNameForCode(unsigned code)688 const char* Simulator::SRegNameForCode(unsigned code) {
689 static_assert(arraysize(Simulator::sreg_names) == kNumberOfVRegisters,
690 "Array must be large enough to hold all register names.");
691 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
692 return sreg_names[code % kNumberOfVRegisters];
693 }
694
695
DRegNameForCode(unsigned code)696 const char* Simulator::DRegNameForCode(unsigned code) {
697 static_assert(arraysize(Simulator::dreg_names) == kNumberOfVRegisters,
698 "Array must be large enough to hold all register names.");
699 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
700 return dreg_names[code % kNumberOfVRegisters];
701 }
702
703
VRegNameForCode(unsigned code)704 const char* Simulator::VRegNameForCode(unsigned code) {
705 static_assert(arraysize(Simulator::vreg_names) == kNumberOfVRegisters,
706 "Array must be large enough to hold all register names.");
707 DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
708 return vreg_names[code % kNumberOfVRegisters];
709 }
710
ReadUintFromMem(VectorFormat vform,int index,uint64_t addr) const711 void LogicVRegister::ReadUintFromMem(VectorFormat vform, int index,
712 uint64_t addr) const {
713 switch (LaneSizeInBitsFromFormat(vform)) {
714 case 8:
715 register_.Insert(index, SimMemory::Read<uint8_t>(addr));
716 break;
717 case 16:
718 register_.Insert(index, SimMemory::Read<uint16_t>(addr));
719 break;
720 case 32:
721 register_.Insert(index, SimMemory::Read<uint32_t>(addr));
722 break;
723 case 64:
724 register_.Insert(index, SimMemory::Read<uint64_t>(addr));
725 break;
726 default:
727 UNREACHABLE();
728 return;
729 }
730 }
731
WriteUintToMem(VectorFormat vform,int index,uint64_t addr) const732 void LogicVRegister::WriteUintToMem(VectorFormat vform, int index,
733 uint64_t addr) const {
734 switch (LaneSizeInBitsFromFormat(vform)) {
735 case 8:
736 SimMemory::Write<uint8_t>(addr, static_cast<uint8_t>(Uint(vform, index)));
737 break;
738 case 16:
739 SimMemory::Write<uint16_t>(addr,
740 static_cast<uint16_t>(Uint(vform, index)));
741 break;
742 case 32:
743 SimMemory::Write<uint32_t>(addr,
744 static_cast<uint32_t>(Uint(vform, index)));
745 break;
746 case 64:
747 SimMemory::Write<uint64_t>(addr, Uint(vform, index));
748 break;
749 default:
750 UNREACHABLE();
751 return;
752 }
753 }
754
755
CodeFromName(const char * name)756 int Simulator::CodeFromName(const char* name) {
757 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
758 if ((strcmp(xreg_names[i], name) == 0) ||
759 (strcmp(wreg_names[i], name) == 0)) {
760 return i;
761 }
762 }
763 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
764 if ((strcmp(vreg_names[i], name) == 0) ||
765 (strcmp(dreg_names[i], name) == 0) ||
766 (strcmp(sreg_names[i], name) == 0)) {
767 return i;
768 }
769 }
770 if ((strcmp("sp", name) == 0) || (strcmp("wsp", name) == 0)) {
771 return kSPRegInternalCode;
772 }
773 return -1;
774 }
775
776
777 // Helpers ---------------------------------------------------------------------
778 template <typename T>
AddWithCarry(bool set_flags,T left,T right,int carry_in)779 T Simulator::AddWithCarry(bool set_flags, T left, T right, int carry_in) {
780 // Use unsigned types to avoid implementation-defined overflow behaviour.
781 static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
782 static_assert((sizeof(T) == kWRegSize) || (sizeof(T) == kXRegSize),
783 "Only W- or X-sized operands are tested");
784
785 DCHECK((carry_in == 0) || (carry_in == 1));
786 T result = left + right + carry_in;
787
788 if (set_flags) {
789 nzcv().SetN(CalcNFlag(result));
790 nzcv().SetZ(CalcZFlag(result));
791
792 // Compute the C flag by comparing the result to the max unsigned integer.
793 T max_uint_2op = std::numeric_limits<T>::max() - carry_in;
794 nzcv().SetC((left > max_uint_2op) || ((max_uint_2op - left) < right));
795
796 // Overflow iff the sign bit is the same for the two inputs and different
797 // for the result.
798 T sign_mask = T(1) << (sizeof(T) * 8 - 1);
799 T left_sign = left & sign_mask;
800 T right_sign = right & sign_mask;
801 T result_sign = result & sign_mask;
802 nzcv().SetV((left_sign == right_sign) && (left_sign != result_sign));
803
804 LogSystemRegister(NZCV);
805 }
806 return result;
807 }
808
809
810 template<typename T>
AddSubWithCarry(Instruction * instr)811 void Simulator::AddSubWithCarry(Instruction* instr) {
812 // Use unsigned types to avoid implementation-defined overflow behaviour.
813 static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
814
815 T op2 = reg<T>(instr->Rm());
816 T new_val;
817
818 if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
819 op2 = ~op2;
820 }
821
822 new_val = AddWithCarry<T>(instr->FlagsUpdate(),
823 reg<T>(instr->Rn()),
824 op2,
825 nzcv().C());
826
827 set_reg<T>(instr->Rd(), new_val);
828 }
829
830 template <typename T>
ShiftOperand(T value,Shift shift_type,unsigned amount)831 T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
832 typedef typename std::make_unsigned<T>::type unsignedT;
833
834 if (amount == 0) {
835 return value;
836 }
837
838 switch (shift_type) {
839 case LSL:
840 return value << amount;
841 case LSR:
842 return static_cast<unsignedT>(value) >> amount;
843 case ASR:
844 return value >> amount;
845 case ROR: {
846 unsignedT mask = (static_cast<unsignedT>(1) << amount) - 1;
847 return (static_cast<unsignedT>(value) >> amount) |
848 ((value & mask) << (sizeof(mask) * 8 - amount));
849 }
850 default:
851 UNIMPLEMENTED();
852 return 0;
853 }
854 }
855
856
857 template <typename T>
ExtendValue(T value,Extend extend_type,unsigned left_shift)858 T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
859 const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8;
860 const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8;
861 const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8;
862
863 switch (extend_type) {
864 case UXTB:
865 value &= kByteMask;
866 break;
867 case UXTH:
868 value &= kHalfWordMask;
869 break;
870 case UXTW:
871 value &= kWordMask;
872 break;
873 case SXTB:
874 value = (value << kSignExtendBShift) >> kSignExtendBShift;
875 break;
876 case SXTH:
877 value = (value << kSignExtendHShift) >> kSignExtendHShift;
878 break;
879 case SXTW:
880 value = (value << kSignExtendWShift) >> kSignExtendWShift;
881 break;
882 case UXTX:
883 case SXTX:
884 break;
885 default:
886 UNREACHABLE();
887 }
888 return value << left_shift;
889 }
890
891
892 template <typename T>
Extract(Instruction * instr)893 void Simulator::Extract(Instruction* instr) {
894 unsigned lsb = instr->ImmS();
895 T op2 = reg<T>(instr->Rm());
896 T result = op2;
897
898 if (lsb) {
899 T op1 = reg<T>(instr->Rn());
900 result = op2 >> lsb | (op1 << ((sizeof(T) * 8) - lsb));
901 }
902 set_reg<T>(instr->Rd(), result);
903 }
904
905
FPCompare(double val0,double val1)906 void Simulator::FPCompare(double val0, double val1) {
907 AssertSupportedFPCR();
908
909 // TODO(jbramley): This assumes that the C++ implementation handles
910 // comparisons in the way that we expect (as per AssertSupportedFPCR()).
911 if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
912 nzcv().SetRawValue(FPUnorderedFlag);
913 } else if (val0 < val1) {
914 nzcv().SetRawValue(FPLessThanFlag);
915 } else if (val0 > val1) {
916 nzcv().SetRawValue(FPGreaterThanFlag);
917 } else if (val0 == val1) {
918 nzcv().SetRawValue(FPEqualFlag);
919 } else {
920 UNREACHABLE();
921 }
922 LogSystemRegister(NZCV);
923 }
924
GetPrintRegisterFormatForSize(size_t reg_size,size_t lane_size)925 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
926 size_t reg_size, size_t lane_size) {
927 DCHECK_GE(reg_size, lane_size);
928
929 uint32_t format = 0;
930 if (reg_size != lane_size) {
931 switch (reg_size) {
932 default:
933 UNREACHABLE();
934 case kQRegSize:
935 format = kPrintRegAsQVector;
936 break;
937 case kDRegSize:
938 format = kPrintRegAsDVector;
939 break;
940 }
941 }
942
943 switch (lane_size) {
944 default:
945 UNREACHABLE();
946 case kQRegSize:
947 format |= kPrintReg1Q;
948 break;
949 case kDRegSize:
950 format |= kPrintReg1D;
951 break;
952 case kSRegSize:
953 format |= kPrintReg1S;
954 break;
955 case kHRegSize:
956 format |= kPrintReg1H;
957 break;
958 case kBRegSize:
959 format |= kPrintReg1B;
960 break;
961 }
962
963 // These sizes would be duplicate case labels.
964 static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
965 static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
966 static_assert(kPrintXReg == kPrintReg1D,
967 "X and D register printing code is shared.");
968 static_assert(kPrintWReg == kPrintReg1S,
969 "W and S register printing code is shared.");
970
971 return static_cast<PrintRegisterFormat>(format);
972 }
973
GetPrintRegisterFormat(VectorFormat vform)974 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
975 VectorFormat vform) {
976 switch (vform) {
977 default:
978 UNREACHABLE();
979 case kFormat16B:
980 return kPrintReg16B;
981 case kFormat8B:
982 return kPrintReg8B;
983 case kFormat8H:
984 return kPrintReg8H;
985 case kFormat4H:
986 return kPrintReg4H;
987 case kFormat4S:
988 return kPrintReg4S;
989 case kFormat2S:
990 return kPrintReg2S;
991 case kFormat2D:
992 return kPrintReg2D;
993 case kFormat1D:
994 return kPrintReg1D;
995
996 case kFormatB:
997 return kPrintReg1B;
998 case kFormatH:
999 return kPrintReg1H;
1000 case kFormatS:
1001 return kPrintReg1S;
1002 case kFormatD:
1003 return kPrintReg1D;
1004 }
1005 }
1006
GetPrintRegisterFormatFP(VectorFormat vform)1007 Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP(
1008 VectorFormat vform) {
1009 switch (vform) {
1010 default:
1011 UNREACHABLE();
1012 case kFormat4S:
1013 return kPrintReg4SFP;
1014 case kFormat2S:
1015 return kPrintReg2SFP;
1016 case kFormat2D:
1017 return kPrintReg2DFP;
1018 case kFormat1D:
1019 return kPrintReg1DFP;
1020
1021 case kFormatS:
1022 return kPrintReg1SFP;
1023 case kFormatD:
1024 return kPrintReg1DFP;
1025 }
1026 }
1027
SetBreakpoint(Instruction * location)1028 void Simulator::SetBreakpoint(Instruction* location) {
1029 for (unsigned i = 0; i < breakpoints_.size(); i++) {
1030 if (breakpoints_.at(i).location == location) {
1031 PrintF(stream_,
1032 "Existing breakpoint at %p was %s\n",
1033 reinterpret_cast<void*>(location),
1034 breakpoints_.at(i).enabled ? "disabled" : "enabled");
1035 breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
1036 return;
1037 }
1038 }
1039 Breakpoint new_breakpoint = {location, true};
1040 breakpoints_.push_back(new_breakpoint);
1041 PrintF(stream_,
1042 "Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
1043 }
1044
1045
ListBreakpoints()1046 void Simulator::ListBreakpoints() {
1047 PrintF(stream_, "Breakpoints:\n");
1048 for (unsigned i = 0; i < breakpoints_.size(); i++) {
1049 PrintF(stream_, "%p : %s\n",
1050 reinterpret_cast<void*>(breakpoints_.at(i).location),
1051 breakpoints_.at(i).enabled ? "enabled" : "disabled");
1052 }
1053 }
1054
1055
CheckBreakpoints()1056 void Simulator::CheckBreakpoints() {
1057 bool hit_a_breakpoint = false;
1058 for (unsigned i = 0; i < breakpoints_.size(); i++) {
1059 if ((breakpoints_.at(i).location == pc_) &&
1060 breakpoints_.at(i).enabled) {
1061 hit_a_breakpoint = true;
1062 // Disable this breakpoint.
1063 breakpoints_.at(i).enabled = false;
1064 }
1065 }
1066 if (hit_a_breakpoint) {
1067 PrintF(stream_, "Hit and disabled a breakpoint at %p.\n",
1068 reinterpret_cast<void*>(pc_));
1069 Debug();
1070 }
1071 }
1072
1073
CheckBreakNext()1074 void Simulator::CheckBreakNext() {
1075 // If the current instruction is a BL, insert a breakpoint just after it.
1076 if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
1077 SetBreakpoint(pc_->following());
1078 break_on_next_ = false;
1079 }
1080 }
1081
1082
PrintInstructionsAt(Instruction * start,uint64_t count)1083 void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
1084 Instruction* end = start->InstructionAtOffset(count * kInstrSize);
1085 for (Instruction* pc = start; pc < end; pc = pc->following()) {
1086 disassembler_decoder_->Decode(pc);
1087 }
1088 }
1089
PrintWrittenRegisters()1090 void Simulator::PrintWrittenRegisters() {
1091 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1092 if (registers_[i].WrittenSinceLastLog()) PrintRegister(i);
1093 }
1094 }
1095
PrintWrittenVRegisters()1096 void Simulator::PrintWrittenVRegisters() {
1097 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
1098 // At this point there is no type information, so print as a raw 1Q.
1099 if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q);
1100 }
1101 }
1102
PrintSystemRegisters()1103 void Simulator::PrintSystemRegisters() {
1104 PrintSystemRegister(NZCV);
1105 PrintSystemRegister(FPCR);
1106 }
1107
1108
PrintRegisters()1109 void Simulator::PrintRegisters() {
1110 for (unsigned i = 0; i < kNumberOfRegisters; i++) {
1111 PrintRegister(i);
1112 }
1113 }
1114
PrintVRegisters()1115 void Simulator::PrintVRegisters() {
1116 for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
1117 // At this point there is no type information, so print as a raw 1Q.
1118 PrintVRegister(i, kPrintReg1Q);
1119 }
1120 }
1121
1122
PrintRegister(unsigned code,Reg31Mode r31mode)1123 void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
1124 registers_[code].NotifyRegisterLogged();
1125
1126 // Don't print writes into xzr.
1127 if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
1128 return;
1129 }
1130
1131 // The template for all x and w registers:
1132 // "# x{code}: 0x{value}"
1133 // "# w{code}: 0x{value}"
1134
1135 PrintRegisterRawHelper(code, r31mode);
1136 fprintf(stream_, "\n");
1137 }
1138
1139 // Print a register's name and raw value.
1140 //
1141 // The `bytes` and `lsb` arguments can be used to limit the bytes that are
1142 // printed. These arguments are intended for use in cases where register hasn't
1143 // actually been updated (such as in PrintVWrite).
1144 //
1145 // No newline is printed. This allows the caller to print more details (such as
1146 // a floating-point interpretation or a memory access annotation).
PrintVRegisterRawHelper(unsigned code,int bytes,int lsb)1147 void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
1148 // The template for vector types:
1149 // "# v{code}: 0xFFEEDDCCBBAA99887766554433221100".
1150 // An example with bytes=4 and lsb=8:
1151 // "# v{code}: 0xBBAA9988 ".
1152 fprintf(stream_, "# %s%5s: %s", clr_vreg_name, VRegNameForCode(code),
1153 clr_vreg_value);
1154
1155 int msb = lsb + bytes - 1;
1156 int byte = kQRegSize - 1;
1157
1158 // Print leading padding spaces. (Two spaces per byte.)
1159 while (byte > msb) {
1160 fprintf(stream_, " ");
1161 byte--;
1162 }
1163
1164 // Print the specified part of the value, byte by byte.
1165 qreg_t rawbits = qreg(code);
1166 fprintf(stream_, "0x");
1167 while (byte >= lsb) {
1168 fprintf(stream_, "%02x", rawbits.val[byte]);
1169 byte--;
1170 }
1171
1172 // Print trailing padding spaces.
1173 while (byte >= 0) {
1174 fprintf(stream_, " ");
1175 byte--;
1176 }
1177 fprintf(stream_, "%s", clr_normal);
1178 }
1179
1180 // Print each of the specified lanes of a register as a float or double value.
1181 //
1182 // The `lane_count` and `lslane` arguments can be used to limit the lanes that
1183 // are printed. These arguments are intended for use in cases where register
1184 // hasn't actually been updated (such as in PrintVWrite).
1185 //
1186 // No newline is printed. This allows the caller to print more details (such as
1187 // a memory access annotation).
PrintVRegisterFPHelper(unsigned code,unsigned lane_size_in_bytes,int lane_count,int rightmost_lane)1188 void Simulator::PrintVRegisterFPHelper(unsigned code,
1189 unsigned lane_size_in_bytes,
1190 int lane_count, int rightmost_lane) {
1191 DCHECK((lane_size_in_bytes == kSRegSize) ||
1192 (lane_size_in_bytes == kDRegSize));
1193
1194 unsigned msb = (lane_count + rightmost_lane) * lane_size_in_bytes;
1195 DCHECK_LE(msb, static_cast<unsigned>(kQRegSize));
1196
1197 // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register
1198 // name is used:
1199 // " (s{code}: {value})"
1200 // " (d{code}: {value})"
1201 // For vector types, "..." is used to represent one or more omitted lanes.
1202 // " (..., {value}, {value}, ...)"
1203 if ((lane_count == 1) && (rightmost_lane == 0)) {
1204 const char* name = (lane_size_in_bytes == kSRegSize)
1205 ? SRegNameForCode(code)
1206 : DRegNameForCode(code);
1207 fprintf(stream_, " (%s%s: ", clr_vreg_name, name);
1208 } else {
1209 if (msb < (kQRegSize - 1)) {
1210 fprintf(stream_, " (..., ");
1211 } else {
1212 fprintf(stream_, " (");
1213 }
1214 }
1215
1216 // Print the list of values.
1217 const char* separator = "";
1218 int leftmost_lane = rightmost_lane + lane_count - 1;
1219 for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) {
1220 double value = (lane_size_in_bytes == kSRegSize)
1221 ? vreg(code).Get<float>(lane)
1222 : vreg(code).Get<double>(lane);
1223 fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal);
1224 separator = ", ";
1225 }
1226
1227 if (rightmost_lane > 0) {
1228 fprintf(stream_, ", ...");
1229 }
1230 fprintf(stream_, ")");
1231 }
1232
1233 // Print a register's name and raw value.
1234 //
1235 // Only the least-significant `size_in_bytes` bytes of the register are printed,
1236 // but the value is aligned as if the whole register had been printed.
1237 //
1238 // For typical register updates, size_in_bytes should be set to kXRegSize
1239 // -- the default -- so that the whole register is printed. Other values of
1240 // size_in_bytes are intended for use when the register hasn't actually been
1241 // updated (such as in PrintWrite).
1242 //
1243 // No newline is printed. This allows the caller to print more details (such as
1244 // a memory access annotation).
PrintRegisterRawHelper(unsigned code,Reg31Mode r31mode,int size_in_bytes)1245 void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
1246 int size_in_bytes) {
1247 // The template for all supported sizes.
1248 // "# x{code}: 0xFFEEDDCCBBAA9988"
1249 // "# w{code}: 0xBBAA9988"
1250 // "# w{code}<15:0>: 0x9988"
1251 // "# w{code}<7:0>: 0x88"
1252 unsigned padding_chars = (kXRegSize - size_in_bytes) * 2;
1253
1254 const char* name = "";
1255 const char* suffix = "";
1256 switch (size_in_bytes) {
1257 case kXRegSize:
1258 name = XRegNameForCode(code, r31mode);
1259 break;
1260 case kWRegSize:
1261 name = WRegNameForCode(code, r31mode);
1262 break;
1263 case 2:
1264 name = WRegNameForCode(code, r31mode);
1265 suffix = "<15:0>";
1266 padding_chars -= strlen(suffix);
1267 break;
1268 case 1:
1269 name = WRegNameForCode(code, r31mode);
1270 suffix = "<7:0>";
1271 padding_chars -= strlen(suffix);
1272 break;
1273 default:
1274 UNREACHABLE();
1275 }
1276 fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix);
1277
1278 // Print leading padding spaces.
1279 DCHECK_LT(padding_chars, kXRegSize * 2U);
1280 for (unsigned i = 0; i < padding_chars; i++) {
1281 putc(' ', stream_);
1282 }
1283
1284 // Print the specified bits in hexadecimal format.
1285 uint64_t bits = reg<uint64_t>(code, r31mode);
1286 bits &= kXRegMask >> ((kXRegSize - size_in_bytes) * 8);
1287 static_assert(sizeof(bits) == kXRegSize,
1288 "X registers and uint64_t must be the same size.");
1289
1290 int chars = size_in_bytes * 2;
1291 fprintf(stream_, "%s0x%0*" PRIx64 "%s", clr_reg_value, chars, bits,
1292 clr_normal);
1293 }
1294
PrintVRegister(unsigned code,PrintRegisterFormat format)1295 void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) {
1296 vregisters_[code].NotifyRegisterLogged();
1297
1298 int lane_size_log2 = format & kPrintRegLaneSizeMask;
1299
1300 int reg_size_log2;
1301 if (format & kPrintRegAsQVector) {
1302 reg_size_log2 = kQRegSizeLog2;
1303 } else if (format & kPrintRegAsDVector) {
1304 reg_size_log2 = kDRegSizeLog2;
1305 } else {
1306 // Scalar types.
1307 reg_size_log2 = lane_size_log2;
1308 }
1309
1310 int lane_count = 1 << (reg_size_log2 - lane_size_log2);
1311 int lane_size = 1 << lane_size_log2;
1312
1313 // The template for vector types:
1314 // "# v{code}: 0x{rawbits} (..., {value}, ...)".
1315 // The template for scalar types:
1316 // "# v{code}: 0x{rawbits} ({reg}:{value})".
1317 // The values in parentheses after the bit representations are floating-point
1318 // interpretations. They are displayed only if the kPrintVRegAsFP bit is set.
1319
1320 PrintVRegisterRawHelper(code);
1321 if (format & kPrintRegAsFP) {
1322 PrintVRegisterFPHelper(code, lane_size, lane_count);
1323 }
1324
1325 fprintf(stream_, "\n");
1326 }
1327
1328
PrintSystemRegister(SystemRegister id)1329 void Simulator::PrintSystemRegister(SystemRegister id) {
1330 switch (id) {
1331 case NZCV:
1332 fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
1333 clr_flag_name, clr_flag_value,
1334 nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
1335 clr_normal);
1336 break;
1337 case FPCR: {
1338 static const char * rmode[] = {
1339 "0b00 (Round to Nearest)",
1340 "0b01 (Round towards Plus Infinity)",
1341 "0b10 (Round towards Minus Infinity)",
1342 "0b11 (Round towards Zero)"
1343 };
1344 DCHECK(fpcr().RMode() < arraysize(rmode));
1345 fprintf(stream_,
1346 "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
1347 clr_flag_name, clr_flag_value,
1348 fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
1349 clr_normal);
1350 break;
1351 }
1352 default:
1353 UNREACHABLE();
1354 }
1355 }
1356
PrintRead(uintptr_t address,unsigned reg_code,PrintRegisterFormat format)1357 void Simulator::PrintRead(uintptr_t address, unsigned reg_code,
1358 PrintRegisterFormat format) {
1359 registers_[reg_code].NotifyRegisterLogged();
1360
1361 USE(format);
1362
1363 // The template is "# {reg}: 0x{value} <- {address}".
1364 PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister);
1365 fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
1366 clr_memory_address, address, clr_normal);
1367 }
1368
PrintVRead(uintptr_t address,unsigned reg_code,PrintRegisterFormat format,unsigned lane)1369 void Simulator::PrintVRead(uintptr_t address, unsigned reg_code,
1370 PrintRegisterFormat format, unsigned lane) {
1371 vregisters_[reg_code].NotifyRegisterLogged();
1372
1373 // The template is "# v{code}: 0x{rawbits} <- address".
1374 PrintVRegisterRawHelper(reg_code);
1375 if (format & kPrintRegAsFP) {
1376 PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format),
1377 GetPrintRegLaneCount(format), lane);
1378 }
1379 fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
1380 clr_memory_address, address, clr_normal);
1381 }
1382
PrintWrite(uintptr_t address,unsigned reg_code,PrintRegisterFormat format)1383 void Simulator::PrintWrite(uintptr_t address, unsigned reg_code,
1384 PrintRegisterFormat format) {
1385 DCHECK_EQ(GetPrintRegLaneCount(format), 1U);
1386
1387 // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy
1388 // and readable, the value is aligned with the values in the register trace.
1389 PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister,
1390 GetPrintRegSizeInBytes(format));
1391 fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
1392 clr_memory_address, address, clr_normal);
1393 }
1394
PrintVWrite(uintptr_t address,unsigned reg_code,PrintRegisterFormat format,unsigned lane)1395 void Simulator::PrintVWrite(uintptr_t address, unsigned reg_code,
1396 PrintRegisterFormat format, unsigned lane) {
1397 // The templates:
1398 // "# v{code}: 0x{rawbits} -> {address}"
1399 // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}".
1400 // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}"
1401 // Because this trace doesn't represent a change to the source register's
1402 // value, only the relevant part of the value is printed. To keep the trace
1403 // tidy and readable, the raw value is aligned with the other values in the
1404 // register trace.
1405 int lane_count = GetPrintRegLaneCount(format);
1406 int lane_size = GetPrintRegLaneSizeInBytes(format);
1407 int reg_size = GetPrintRegSizeInBytes(format);
1408 PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane);
1409 if (format & kPrintRegAsFP) {
1410 PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane);
1411 }
1412 fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
1413 clr_memory_address, address, clr_normal);
1414 }
1415
1416
1417 // Visitors---------------------------------------------------------------------
1418
VisitUnimplemented(Instruction * instr)1419 void Simulator::VisitUnimplemented(Instruction* instr) {
1420 fprintf(stream_, "Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
1421 reinterpret_cast<void*>(instr), instr->InstructionBits());
1422 UNIMPLEMENTED();
1423 }
1424
1425
VisitUnallocated(Instruction * instr)1426 void Simulator::VisitUnallocated(Instruction* instr) {
1427 fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
1428 reinterpret_cast<void*>(instr), instr->InstructionBits());
1429 UNIMPLEMENTED();
1430 }
1431
1432
VisitPCRelAddressing(Instruction * instr)1433 void Simulator::VisitPCRelAddressing(Instruction* instr) {
1434 switch (instr->Mask(PCRelAddressingMask)) {
1435 case ADR:
1436 set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
1437 break;
1438 case ADRP: // Not implemented in the assembler.
1439 UNIMPLEMENTED();
1440 break;
1441 default:
1442 UNREACHABLE();
1443 break;
1444 }
1445 }
1446
1447
VisitUnconditionalBranch(Instruction * instr)1448 void Simulator::VisitUnconditionalBranch(Instruction* instr) {
1449 switch (instr->Mask(UnconditionalBranchMask)) {
1450 case BL:
1451 set_lr(instr->following());
1452 V8_FALLTHROUGH;
1453 case B:
1454 set_pc(instr->ImmPCOffsetTarget());
1455 break;
1456 default:
1457 UNREACHABLE();
1458 }
1459 }
1460
1461
VisitConditionalBranch(Instruction * instr)1462 void Simulator::VisitConditionalBranch(Instruction* instr) {
1463 DCHECK(instr->Mask(ConditionalBranchMask) == B_cond);
1464 if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
1465 set_pc(instr->ImmPCOffsetTarget());
1466 }
1467 }
1468
1469
VisitUnconditionalBranchToRegister(Instruction * instr)1470 void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
1471 Instruction* target = reg<Instruction*>(instr->Rn());
1472 switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
1473 case BLR: {
1474 set_lr(instr->following());
1475 if (instr->Rn() == 31) {
1476 // BLR XZR is used as a guard for the constant pool. We should never hit
1477 // this, but if we do trap to allow debugging.
1478 Debug();
1479 }
1480 V8_FALLTHROUGH;
1481 }
1482 case BR:
1483 case RET: set_pc(target); break;
1484 default: UNIMPLEMENTED();
1485 }
1486 }
1487
1488
VisitTestBranch(Instruction * instr)1489 void Simulator::VisitTestBranch(Instruction* instr) {
1490 unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
1491 instr->ImmTestBranchBit40();
1492 bool take_branch = ((xreg(instr->Rt()) & (1UL << bit_pos)) == 0);
1493 switch (instr->Mask(TestBranchMask)) {
1494 case TBZ: break;
1495 case TBNZ: take_branch = !take_branch; break;
1496 default: UNIMPLEMENTED();
1497 }
1498 if (take_branch) {
1499 set_pc(instr->ImmPCOffsetTarget());
1500 }
1501 }
1502
1503
VisitCompareBranch(Instruction * instr)1504 void Simulator::VisitCompareBranch(Instruction* instr) {
1505 unsigned rt = instr->Rt();
1506 bool take_branch = false;
1507 switch (instr->Mask(CompareBranchMask)) {
1508 case CBZ_w: take_branch = (wreg(rt) == 0); break;
1509 case CBZ_x: take_branch = (xreg(rt) == 0); break;
1510 case CBNZ_w: take_branch = (wreg(rt) != 0); break;
1511 case CBNZ_x: take_branch = (xreg(rt) != 0); break;
1512 default: UNIMPLEMENTED();
1513 }
1514 if (take_branch) {
1515 set_pc(instr->ImmPCOffsetTarget());
1516 }
1517 }
1518
1519
1520 template<typename T>
AddSubHelper(Instruction * instr,T op2)1521 void Simulator::AddSubHelper(Instruction* instr, T op2) {
1522 // Use unsigned types to avoid implementation-defined overflow behaviour.
1523 static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
1524
1525 bool set_flags = instr->FlagsUpdate();
1526 T new_val = 0;
1527 Instr operation = instr->Mask(AddSubOpMask);
1528
1529 switch (operation) {
1530 case ADD:
1531 case ADDS: {
1532 new_val = AddWithCarry<T>(set_flags,
1533 reg<T>(instr->Rn(), instr->RnMode()),
1534 op2);
1535 break;
1536 }
1537 case SUB:
1538 case SUBS: {
1539 new_val = AddWithCarry<T>(set_flags,
1540 reg<T>(instr->Rn(), instr->RnMode()),
1541 ~op2,
1542 1);
1543 break;
1544 }
1545 default: UNREACHABLE();
1546 }
1547
1548 set_reg<T>(instr->Rd(), new_val, instr->RdMode());
1549 }
1550
1551
VisitAddSubShifted(Instruction * instr)1552 void Simulator::VisitAddSubShifted(Instruction* instr) {
1553 Shift shift_type = static_cast<Shift>(instr->ShiftDP());
1554 unsigned shift_amount = instr->ImmDPShift();
1555
1556 if (instr->SixtyFourBits()) {
1557 uint64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
1558 AddSubHelper(instr, op2);
1559 } else {
1560 uint32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
1561 AddSubHelper(instr, op2);
1562 }
1563 }
1564
1565
VisitAddSubImmediate(Instruction * instr)1566 void Simulator::VisitAddSubImmediate(Instruction* instr) {
1567 int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
1568 if (instr->SixtyFourBits()) {
1569 AddSubHelper(instr, static_cast<uint64_t>(op2));
1570 } else {
1571 AddSubHelper(instr, static_cast<uint32_t>(op2));
1572 }
1573 }
1574
1575
VisitAddSubExtended(Instruction * instr)1576 void Simulator::VisitAddSubExtended(Instruction* instr) {
1577 Extend ext = static_cast<Extend>(instr->ExtendMode());
1578 unsigned left_shift = instr->ImmExtendShift();
1579 if (instr->SixtyFourBits()) {
1580 uint64_t op2 = ExtendValue(xreg(instr->Rm()), ext, left_shift);
1581 AddSubHelper(instr, op2);
1582 } else {
1583 uint32_t op2 = ExtendValue(wreg(instr->Rm()), ext, left_shift);
1584 AddSubHelper(instr, op2);
1585 }
1586 }
1587
1588
VisitAddSubWithCarry(Instruction * instr)1589 void Simulator::VisitAddSubWithCarry(Instruction* instr) {
1590 if (instr->SixtyFourBits()) {
1591 AddSubWithCarry<uint64_t>(instr);
1592 } else {
1593 AddSubWithCarry<uint32_t>(instr);
1594 }
1595 }
1596
1597
VisitLogicalShifted(Instruction * instr)1598 void Simulator::VisitLogicalShifted(Instruction* instr) {
1599 Shift shift_type = static_cast<Shift>(instr->ShiftDP());
1600 unsigned shift_amount = instr->ImmDPShift();
1601
1602 if (instr->SixtyFourBits()) {
1603 uint64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
1604 op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
1605 LogicalHelper(instr, op2);
1606 } else {
1607 uint32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
1608 op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
1609 LogicalHelper(instr, op2);
1610 }
1611 }
1612
1613
VisitLogicalImmediate(Instruction * instr)1614 void Simulator::VisitLogicalImmediate(Instruction* instr) {
1615 if (instr->SixtyFourBits()) {
1616 LogicalHelper(instr, static_cast<uint64_t>(instr->ImmLogical()));
1617 } else {
1618 LogicalHelper(instr, static_cast<uint32_t>(instr->ImmLogical()));
1619 }
1620 }
1621
1622
1623 template<typename T>
LogicalHelper(Instruction * instr,T op2)1624 void Simulator::LogicalHelper(Instruction* instr, T op2) {
1625 T op1 = reg<T>(instr->Rn());
1626 T result = 0;
1627 bool update_flags = false;
1628
1629 // Switch on the logical operation, stripping out the NOT bit, as it has a
1630 // different meaning for logical immediate instructions.
1631 switch (instr->Mask(LogicalOpMask & ~NOT)) {
1632 case ANDS: update_flags = true; V8_FALLTHROUGH;
1633 case AND: result = op1 & op2; break;
1634 case ORR: result = op1 | op2; break;
1635 case EOR: result = op1 ^ op2; break;
1636 default:
1637 UNIMPLEMENTED();
1638 }
1639
1640 if (update_flags) {
1641 nzcv().SetN(CalcNFlag(result));
1642 nzcv().SetZ(CalcZFlag(result));
1643 nzcv().SetC(0);
1644 nzcv().SetV(0);
1645 LogSystemRegister(NZCV);
1646 }
1647
1648 set_reg<T>(instr->Rd(), result, instr->RdMode());
1649 }
1650
1651
VisitConditionalCompareRegister(Instruction * instr)1652 void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
1653 if (instr->SixtyFourBits()) {
1654 ConditionalCompareHelper(instr, static_cast<uint64_t>(xreg(instr->Rm())));
1655 } else {
1656 ConditionalCompareHelper(instr, static_cast<uint32_t>(wreg(instr->Rm())));
1657 }
1658 }
1659
1660
VisitConditionalCompareImmediate(Instruction * instr)1661 void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
1662 if (instr->SixtyFourBits()) {
1663 ConditionalCompareHelper(instr, static_cast<uint64_t>(instr->ImmCondCmp()));
1664 } else {
1665 ConditionalCompareHelper(instr, static_cast<uint32_t>(instr->ImmCondCmp()));
1666 }
1667 }
1668
1669
1670 template<typename T>
ConditionalCompareHelper(Instruction * instr,T op2)1671 void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
1672 // Use unsigned types to avoid implementation-defined overflow behaviour.
1673 static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
1674
1675 T op1 = reg<T>(instr->Rn());
1676
1677 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
1678 // If the condition passes, set the status flags to the result of comparing
1679 // the operands.
1680 if (instr->Mask(ConditionalCompareMask) == CCMP) {
1681 AddWithCarry<T>(true, op1, ~op2, 1);
1682 } else {
1683 DCHECK(instr->Mask(ConditionalCompareMask) == CCMN);
1684 AddWithCarry<T>(true, op1, op2, 0);
1685 }
1686 } else {
1687 // If the condition fails, set the status flags to the nzcv immediate.
1688 nzcv().SetFlags(instr->Nzcv());
1689 LogSystemRegister(NZCV);
1690 }
1691 }
1692
1693
VisitLoadStoreUnsignedOffset(Instruction * instr)1694 void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
1695 int offset = instr->ImmLSUnsigned() << instr->SizeLS();
1696 LoadStoreHelper(instr, offset, Offset);
1697 }
1698
1699
VisitLoadStoreUnscaledOffset(Instruction * instr)1700 void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
1701 LoadStoreHelper(instr, instr->ImmLS(), Offset);
1702 }
1703
1704
VisitLoadStorePreIndex(Instruction * instr)1705 void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
1706 LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
1707 }
1708
1709
VisitLoadStorePostIndex(Instruction * instr)1710 void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
1711 LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
1712 }
1713
1714
VisitLoadStoreRegisterOffset(Instruction * instr)1715 void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
1716 Extend ext = static_cast<Extend>(instr->ExtendMode());
1717 DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
1718 unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
1719
1720 int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount);
1721 LoadStoreHelper(instr, offset, Offset);
1722 }
1723
1724
LoadStoreHelper(Instruction * instr,int64_t offset,AddrMode addrmode)1725 void Simulator::LoadStoreHelper(Instruction* instr,
1726 int64_t offset,
1727 AddrMode addrmode) {
1728 unsigned srcdst = instr->Rt();
1729 unsigned addr_reg = instr->Rn();
1730 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
1731 uintptr_t stack = 0;
1732
1733 {
1734 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1735 if (instr->IsLoad()) {
1736 local_monitor_.NotifyLoad();
1737 } else {
1738 local_monitor_.NotifyStore();
1739 global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
1740 }
1741 }
1742
1743 // Handle the writeback for stores before the store. On a CPU the writeback
1744 // and the store are atomic, but when running on the simulator it is possible
1745 // to be interrupted in between. The simulator is not thread safe and V8 does
1746 // not require it to be to run JavaScript therefore the profiler may sample
1747 // the "simulated" CPU in the middle of load/store with writeback. The code
1748 // below ensures that push operations are safe even when interrupted: the
1749 // stack pointer will be decremented before adding an element to the stack.
1750 if (instr->IsStore()) {
1751 LoadStoreWriteBack(addr_reg, offset, addrmode);
1752
1753 // For store the address post writeback is used to check access below the
1754 // stack.
1755 stack = sp();
1756 }
1757
1758 LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
1759 switch (op) {
1760 // Use _no_log variants to suppress the register trace (LOG_REGS,
1761 // LOG_VREGS). We will print a more detailed log.
1762 case LDRB_w: set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break;
1763 case LDRH_w: set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break;
1764 case LDR_w: set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break;
1765 case LDR_x: set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address)); break;
1766 case LDRSB_w: set_wreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
1767 case LDRSH_w: set_wreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
1768 case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
1769 case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
1770 case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break;
1771 case LDR_b:
1772 set_breg_no_log(srcdst, MemoryRead<uint8_t>(address));
1773 break;
1774 case LDR_h:
1775 set_hreg_no_log(srcdst, MemoryRead<uint16_t>(address));
1776 break;
1777 case LDR_s: set_sreg_no_log(srcdst, MemoryRead<float>(address)); break;
1778 case LDR_d: set_dreg_no_log(srcdst, MemoryRead<double>(address)); break;
1779 case LDR_q:
1780 set_qreg_no_log(srcdst, MemoryRead<qreg_t>(address));
1781 break;
1782
1783 case STRB_w: MemoryWrite<uint8_t>(address, wreg(srcdst)); break;
1784 case STRH_w: MemoryWrite<uint16_t>(address, wreg(srcdst)); break;
1785 case STR_w: MemoryWrite<uint32_t>(address, wreg(srcdst)); break;
1786 case STR_x: MemoryWrite<uint64_t>(address, xreg(srcdst)); break;
1787 case STR_b:
1788 MemoryWrite<uint8_t>(address, breg(srcdst));
1789 break;
1790 case STR_h:
1791 MemoryWrite<uint16_t>(address, hreg(srcdst));
1792 break;
1793 case STR_s: MemoryWrite<float>(address, sreg(srcdst)); break;
1794 case STR_d: MemoryWrite<double>(address, dreg(srcdst)); break;
1795 case STR_q:
1796 MemoryWrite<qreg_t>(address, qreg(srcdst));
1797 break;
1798
1799 default: UNIMPLEMENTED();
1800 }
1801
1802 // Print a detailed trace (including the memory address) instead of the basic
1803 // register:value trace generated by set_*reg().
1804 unsigned access_size = 1 << instr->SizeLS();
1805 if (instr->IsLoad()) {
1806 if ((op == LDR_s) || (op == LDR_d)) {
1807 LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
1808 } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) {
1809 LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
1810 } else {
1811 LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
1812 }
1813 } else {
1814 if ((op == STR_s) || (op == STR_d)) {
1815 LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
1816 } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) {
1817 LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
1818 } else {
1819 LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
1820 }
1821 }
1822
1823 // Handle the writeback for loads after the load to ensure safe pop
1824 // operation even when interrupted in the middle of it. The stack pointer
1825 // is only updated after the load so pop(fp) will never break the invariant
1826 // sp <= fp expected while walking the stack in the sampler.
1827 if (instr->IsLoad()) {
1828 // For loads the address pre writeback is used to check access below the
1829 // stack.
1830 stack = sp();
1831
1832 LoadStoreWriteBack(addr_reg, offset, addrmode);
1833 }
1834
1835 // Accesses below the stack pointer (but above the platform stack limit) are
1836 // not allowed in the ABI.
1837 CheckMemoryAccess(address, stack);
1838 }
1839
1840
VisitLoadStorePairOffset(Instruction * instr)1841 void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
1842 LoadStorePairHelper(instr, Offset);
1843 }
1844
1845
VisitLoadStorePairPreIndex(Instruction * instr)1846 void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
1847 LoadStorePairHelper(instr, PreIndex);
1848 }
1849
1850
VisitLoadStorePairPostIndex(Instruction * instr)1851 void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
1852 LoadStorePairHelper(instr, PostIndex);
1853 }
1854
1855
LoadStorePairHelper(Instruction * instr,AddrMode addrmode)1856 void Simulator::LoadStorePairHelper(Instruction* instr,
1857 AddrMode addrmode) {
1858 unsigned rt = instr->Rt();
1859 unsigned rt2 = instr->Rt2();
1860 unsigned addr_reg = instr->Rn();
1861 size_t access_size = 1 << instr->SizeLSPair();
1862 int64_t offset = instr->ImmLSPair() * access_size;
1863 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
1864 uintptr_t address2 = address + access_size;
1865 uintptr_t stack = 0;
1866
1867 {
1868 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
1869 if (instr->IsLoad()) {
1870 local_monitor_.NotifyLoad();
1871 } else {
1872 local_monitor_.NotifyStore();
1873 global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
1874 }
1875 }
1876
1877 // Handle the writeback for stores before the store. On a CPU the writeback
1878 // and the store are atomic, but when running on the simulator it is possible
1879 // to be interrupted in between. The simulator is not thread safe and V8 does
1880 // not require it to be to run JavaScript therefore the profiler may sample
1881 // the "simulated" CPU in the middle of load/store with writeback. The code
1882 // below ensures that push operations are safe even when interrupted: the
1883 // stack pointer will be decremented before adding an element to the stack.
1884 if (instr->IsStore()) {
1885 LoadStoreWriteBack(addr_reg, offset, addrmode);
1886
1887 // For store the address post writeback is used to check access below the
1888 // stack.
1889 stack = sp();
1890 }
1891
1892 LoadStorePairOp op =
1893 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
1894
1895 // 'rt' and 'rt2' can only be aliased for stores.
1896 DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
1897
1898 switch (op) {
1899 // Use _no_log variants to suppress the register trace (LOG_REGS,
1900 // LOG_VREGS). We will print a more detailed log.
1901 case LDP_w: {
1902 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
1903 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
1904 set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2));
1905 break;
1906 }
1907 case LDP_s: {
1908 DCHECK_EQ(access_size, static_cast<unsigned>(kSRegSize));
1909 set_sreg_no_log(rt, MemoryRead<float>(address));
1910 set_sreg_no_log(rt2, MemoryRead<float>(address2));
1911 break;
1912 }
1913 case LDP_x: {
1914 DCHECK_EQ(access_size, static_cast<unsigned>(kXRegSize));
1915 set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
1916 set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2));
1917 break;
1918 }
1919 case LDP_d: {
1920 DCHECK_EQ(access_size, static_cast<unsigned>(kDRegSize));
1921 set_dreg_no_log(rt, MemoryRead<double>(address));
1922 set_dreg_no_log(rt2, MemoryRead<double>(address2));
1923 break;
1924 }
1925 case LDP_q: {
1926 DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
1927 set_qreg(rt, MemoryRead<qreg_t>(address), NoRegLog);
1928 set_qreg(rt2, MemoryRead<qreg_t>(address2), NoRegLog);
1929 break;
1930 }
1931 case LDPSW_x: {
1932 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
1933 set_xreg_no_log(rt, MemoryRead<int32_t>(address));
1934 set_xreg_no_log(rt2, MemoryRead<int32_t>(address2));
1935 break;
1936 }
1937 case STP_w: {
1938 DCHECK_EQ(access_size, static_cast<unsigned>(kWRegSize));
1939 MemoryWrite<uint32_t>(address, wreg(rt));
1940 MemoryWrite<uint32_t>(address2, wreg(rt2));
1941 break;
1942 }
1943 case STP_s: {
1944 DCHECK_EQ(access_size, static_cast<unsigned>(kSRegSize));
1945 MemoryWrite<float>(address, sreg(rt));
1946 MemoryWrite<float>(address2, sreg(rt2));
1947 break;
1948 }
1949 case STP_x: {
1950 DCHECK_EQ(access_size, static_cast<unsigned>(kXRegSize));
1951 MemoryWrite<uint64_t>(address, xreg(rt));
1952 MemoryWrite<uint64_t>(address2, xreg(rt2));
1953 break;
1954 }
1955 case STP_d: {
1956 DCHECK_EQ(access_size, static_cast<unsigned>(kDRegSize));
1957 MemoryWrite<double>(address, dreg(rt));
1958 MemoryWrite<double>(address2, dreg(rt2));
1959 break;
1960 }
1961 case STP_q: {
1962 DCHECK_EQ(access_size, static_cast<unsigned>(kQRegSize));
1963 MemoryWrite<qreg_t>(address, qreg(rt));
1964 MemoryWrite<qreg_t>(address2, qreg(rt2));
1965 break;
1966 }
1967 default: UNREACHABLE();
1968 }
1969
1970 // Print a detailed trace (including the memory address) instead of the basic
1971 // register:value trace generated by set_*reg().
1972 if (instr->IsLoad()) {
1973 if ((op == LDP_s) || (op == LDP_d)) {
1974 LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(access_size));
1975 LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size));
1976 } else if (op == LDP_q) {
1977 LogVRead(address, rt, GetPrintRegisterFormatForSize(access_size));
1978 LogVRead(address2, rt2, GetPrintRegisterFormatForSize(access_size));
1979 } else {
1980 LogRead(address, rt, GetPrintRegisterFormatForSize(access_size));
1981 LogRead(address2, rt2, GetPrintRegisterFormatForSize(access_size));
1982 }
1983 } else {
1984 if ((op == STP_s) || (op == STP_d)) {
1985 LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(access_size));
1986 LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size));
1987 } else if (op == STP_q) {
1988 LogVWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
1989 LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size));
1990 } else {
1991 LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
1992 LogWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size));
1993 }
1994 }
1995
1996 // Handle the writeback for loads after the load to ensure safe pop
1997 // operation even when interrupted in the middle of it. The stack pointer
1998 // is only updated after the load so pop(fp) will never break the invariant
1999 // sp <= fp expected while walking the stack in the sampler.
2000 if (instr->IsLoad()) {
2001 // For loads the address pre writeback is used to check access below the
2002 // stack.
2003 stack = sp();
2004
2005 LoadStoreWriteBack(addr_reg, offset, addrmode);
2006 }
2007
2008 // Accesses below the stack pointer (but above the platform stack limit) are
2009 // not allowed in the ABI.
2010 CheckMemoryAccess(address, stack);
2011 }
2012
2013
VisitLoadLiteral(Instruction * instr)2014 void Simulator::VisitLoadLiteral(Instruction* instr) {
2015 uintptr_t address = instr->LiteralAddress();
2016 unsigned rt = instr->Rt();
2017
2018 {
2019 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
2020 local_monitor_.NotifyLoad();
2021 }
2022
2023 switch (instr->Mask(LoadLiteralMask)) {
2024 // Use _no_log variants to suppress the register trace (LOG_REGS,
2025 // LOG_VREGS), then print a more detailed log.
2026 case LDR_w_lit:
2027 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
2028 LogRead(address, rt, kPrintWReg);
2029 break;
2030 case LDR_x_lit:
2031 set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
2032 LogRead(address, rt, kPrintXReg);
2033 break;
2034 case LDR_s_lit:
2035 set_sreg_no_log(rt, MemoryRead<float>(address));
2036 LogVRead(address, rt, kPrintSReg);
2037 break;
2038 case LDR_d_lit:
2039 set_dreg_no_log(rt, MemoryRead<double>(address));
2040 LogVRead(address, rt, kPrintDReg);
2041 break;
2042 default: UNREACHABLE();
2043 }
2044 }
2045
2046
LoadStoreAddress(unsigned addr_reg,int64_t offset,AddrMode addrmode)2047 uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
2048 AddrMode addrmode) {
2049 const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
2050 uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
2051 if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
2052 // When the base register is SP the stack pointer is required to be
2053 // quadword aligned prior to the address calculation and write-backs.
2054 // Misalignment will cause a stack alignment fault.
2055 FATAL("ALIGNMENT EXCEPTION");
2056 }
2057
2058 if ((addrmode == Offset) || (addrmode == PreIndex)) {
2059 address += offset;
2060 }
2061
2062 return address;
2063 }
2064
2065
LoadStoreWriteBack(unsigned addr_reg,int64_t offset,AddrMode addrmode)2066 void Simulator::LoadStoreWriteBack(unsigned addr_reg,
2067 int64_t offset,
2068 AddrMode addrmode) {
2069 if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
2070 DCHECK_NE(offset, 0);
2071 uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
2072 set_reg(addr_reg, address + offset, Reg31IsStackPointer);
2073 }
2074 }
2075
get_transaction_size(unsigned size)2076 Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
2077 switch (size) {
2078 case 0:
2079 return TransactionSize::None;
2080 case 1:
2081 return TransactionSize::Byte;
2082 case 2:
2083 return TransactionSize::HalfWord;
2084 case 4:
2085 return TransactionSize::Word;
2086 case 8:
2087 return TransactionSize::DoubleWord;
2088 default:
2089 UNREACHABLE();
2090 }
2091 return TransactionSize::None;
2092 }
2093
VisitLoadStoreAcquireRelease(Instruction * instr)2094 void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
2095 unsigned rt = instr->Rt();
2096 unsigned rn = instr->Rn();
2097 LoadStoreAcquireReleaseOp op = static_cast<LoadStoreAcquireReleaseOp>(
2098 instr->Mask(LoadStoreAcquireReleaseMask));
2099 int32_t is_acquire_release = instr->LoadStoreXAcquireRelease();
2100 int32_t is_exclusive = (instr->LoadStoreXNotExclusive() == 0);
2101 int32_t is_load = instr->LoadStoreXLoad();
2102 int32_t is_pair = instr->LoadStoreXPair();
2103 USE(is_acquire_release);
2104 USE(is_pair);
2105 DCHECK_NE(is_acquire_release, 0); // Non-acquire/release unimplemented.
2106 DCHECK_EQ(is_pair, 0); // Pair unimplemented.
2107 unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
2108 uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
2109 DCHECK_EQ(address % access_size, 0);
2110 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
2111 if (is_load != 0) {
2112 if (is_exclusive) {
2113 local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size));
2114 global_monitor_.Pointer()->NotifyLoadExcl_Locked(
2115 address, &global_monitor_processor_);
2116 } else {
2117 local_monitor_.NotifyLoad();
2118 }
2119 switch (op) {
2120 case LDAR_b:
2121 case LDAXR_b:
2122 set_wreg_no_log(rt, MemoryRead<uint8_t>(address));
2123 break;
2124 case LDAR_h:
2125 case LDAXR_h:
2126 set_wreg_no_log(rt, MemoryRead<uint16_t>(address));
2127 break;
2128 case LDAR_w:
2129 case LDAXR_w:
2130 set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
2131 break;
2132 case LDAR_x:
2133 case LDAXR_x:
2134 set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
2135 break;
2136 default:
2137 UNIMPLEMENTED();
2138 }
2139 LogRead(address, rt, GetPrintRegisterFormatForSize(access_size));
2140 } else {
2141 if (is_exclusive) {
2142 unsigned rs = instr->Rs();
2143 DCHECK_NE(rs, rt);
2144 DCHECK_NE(rs, rn);
2145 if (local_monitor_.NotifyStoreExcl(address,
2146 get_transaction_size(access_size)) &&
2147 global_monitor_.Pointer()->NotifyStoreExcl_Locked(
2148 address, &global_monitor_processor_)) {
2149 switch (op) {
2150 case STLXR_b:
2151 MemoryWrite<uint8_t>(address, wreg(rt));
2152 break;
2153 case STLXR_h:
2154 MemoryWrite<uint16_t>(address, wreg(rt));
2155 break;
2156 case STLXR_w:
2157 MemoryWrite<uint32_t>(address, wreg(rt));
2158 break;
2159 case STLXR_x:
2160 MemoryWrite<uint64_t>(address, xreg(rt));
2161 break;
2162 default:
2163 UNIMPLEMENTED();
2164 }
2165 LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size));
2166 set_wreg(rs, 0);
2167 } else {
2168 set_wreg(rs, 1);
2169 }
2170 } else {
2171 local_monitor_.NotifyStore();
2172 global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
2173 switch (op) {
2174 case STLR_b:
2175 MemoryWrite<uint8_t>(address, wreg(rt));
2176 break;
2177 case STLR_h:
2178 MemoryWrite<uint16_t>(address, wreg(rt));
2179 break;
2180 case STLR_w:
2181 MemoryWrite<uint32_t>(address, wreg(rt));
2182 break;
2183 case STLR_x:
2184 MemoryWrite<uint64_t>(address, xreg(rt));
2185 break;
2186 default:
2187 UNIMPLEMENTED();
2188 }
2189 }
2190 }
2191 }
2192
CheckMemoryAccess(uintptr_t address,uintptr_t stack)2193 void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
2194 if ((address >= stack_limit_) && (address < stack)) {
2195 fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
2196 fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n",
2197 static_cast<uint64_t>(stack));
2198 fprintf(stream_, " access was here: 0x%016" PRIx64 "\n",
2199 static_cast<uint64_t>(address));
2200 fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n",
2201 static_cast<uint64_t>(stack_limit_));
2202 fprintf(stream_, "\n");
2203 FATAL("ACCESS BELOW STACK POINTER");
2204 }
2205 }
2206
2207
VisitMoveWideImmediate(Instruction * instr)2208 void Simulator::VisitMoveWideImmediate(Instruction* instr) {
2209 MoveWideImmediateOp mov_op =
2210 static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
2211 int64_t new_xn_val = 0;
2212
2213 bool is_64_bits = instr->SixtyFourBits() == 1;
2214 // Shift is limited for W operations.
2215 DCHECK(is_64_bits || (instr->ShiftMoveWide() < 2));
2216
2217 // Get the shifted immediate.
2218 int64_t shift = instr->ShiftMoveWide() * 16;
2219 int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
2220
2221 // Compute the new value.
2222 switch (mov_op) {
2223 case MOVN_w:
2224 case MOVN_x: {
2225 new_xn_val = ~shifted_imm16;
2226 if (!is_64_bits) new_xn_val &= kWRegMask;
2227 break;
2228 }
2229 case MOVK_w:
2230 case MOVK_x: {
2231 unsigned reg_code = instr->Rd();
2232 int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
2233 : wreg(reg_code);
2234 new_xn_val = (prev_xn_val & ~(0xFFFFL << shift)) | shifted_imm16;
2235 break;
2236 }
2237 case MOVZ_w:
2238 case MOVZ_x: {
2239 new_xn_val = shifted_imm16;
2240 break;
2241 }
2242 default:
2243 UNREACHABLE();
2244 }
2245
2246 // Update the destination register.
2247 set_xreg(instr->Rd(), new_xn_val);
2248 }
2249
2250
VisitConditionalSelect(Instruction * instr)2251 void Simulator::VisitConditionalSelect(Instruction* instr) {
2252 uint64_t new_val = xreg(instr->Rn());
2253 if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
2254 new_val = xreg(instr->Rm());
2255 switch (instr->Mask(ConditionalSelectMask)) {
2256 case CSEL_w:
2257 case CSEL_x:
2258 break;
2259 case CSINC_w:
2260 case CSINC_x:
2261 new_val++;
2262 break;
2263 case CSINV_w:
2264 case CSINV_x:
2265 new_val = ~new_val;
2266 break;
2267 case CSNEG_w:
2268 case CSNEG_x:
2269 new_val = -new_val;
2270 break;
2271 default: UNIMPLEMENTED();
2272 }
2273 }
2274 if (instr->SixtyFourBits()) {
2275 set_xreg(instr->Rd(), new_val);
2276 } else {
2277 set_wreg(instr->Rd(), static_cast<uint32_t>(new_val));
2278 }
2279 }
2280
2281
VisitDataProcessing1Source(Instruction * instr)2282 void Simulator::VisitDataProcessing1Source(Instruction* instr) {
2283 unsigned dst = instr->Rd();
2284 unsigned src = instr->Rn();
2285
2286 switch (instr->Mask(DataProcessing1SourceMask)) {
2287 case RBIT_w:
2288 set_wreg(dst, base::bits::ReverseBits(wreg(src)));
2289 break;
2290 case RBIT_x:
2291 set_xreg(dst, base::bits::ReverseBits(xreg(src)));
2292 break;
2293 case REV16_w:
2294 set_wreg(dst, ReverseBytes(wreg(src), 1));
2295 break;
2296 case REV16_x:
2297 set_xreg(dst, ReverseBytes(xreg(src), 1));
2298 break;
2299 case REV_w:
2300 set_wreg(dst, ReverseBytes(wreg(src), 2));
2301 break;
2302 case REV32_x:
2303 set_xreg(dst, ReverseBytes(xreg(src), 2));
2304 break;
2305 case REV_x:
2306 set_xreg(dst, ReverseBytes(xreg(src), 3));
2307 break;
2308 case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
2309 break;
2310 case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
2311 break;
2312 case CLS_w: {
2313 set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits));
2314 break;
2315 }
2316 case CLS_x: {
2317 set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits));
2318 break;
2319 }
2320 default: UNIMPLEMENTED();
2321 }
2322 }
2323
2324
2325 template <typename T>
DataProcessing2Source(Instruction * instr)2326 void Simulator::DataProcessing2Source(Instruction* instr) {
2327 Shift shift_op = NO_SHIFT;
2328 T result = 0;
2329 switch (instr->Mask(DataProcessing2SourceMask)) {
2330 case SDIV_w:
2331 case SDIV_x: {
2332 T rn = reg<T>(instr->Rn());
2333 T rm = reg<T>(instr->Rm());
2334 if ((rn == std::numeric_limits<T>::min()) && (rm == -1)) {
2335 result = std::numeric_limits<T>::min();
2336 } else if (rm == 0) {
2337 // Division by zero can be trapped, but not on A-class processors.
2338 result = 0;
2339 } else {
2340 result = rn / rm;
2341 }
2342 break;
2343 }
2344 case UDIV_w:
2345 case UDIV_x: {
2346 typedef typename std::make_unsigned<T>::type unsignedT;
2347 unsignedT rn = static_cast<unsignedT>(reg<T>(instr->Rn()));
2348 unsignedT rm = static_cast<unsignedT>(reg<T>(instr->Rm()));
2349 if (rm == 0) {
2350 // Division by zero can be trapped, but not on A-class processors.
2351 result = 0;
2352 } else {
2353 result = rn / rm;
2354 }
2355 break;
2356 }
2357 case LSLV_w:
2358 case LSLV_x: shift_op = LSL; break;
2359 case LSRV_w:
2360 case LSRV_x: shift_op = LSR; break;
2361 case ASRV_w:
2362 case ASRV_x: shift_op = ASR; break;
2363 case RORV_w:
2364 case RORV_x: shift_op = ROR; break;
2365 default: UNIMPLEMENTED();
2366 }
2367
2368 if (shift_op != NO_SHIFT) {
2369 // Shift distance encoded in the least-significant five/six bits of the
2370 // register.
2371 unsigned shift = wreg(instr->Rm());
2372 if (sizeof(T) == kWRegSize) {
2373 shift &= kShiftAmountWRegMask;
2374 } else {
2375 shift &= kShiftAmountXRegMask;
2376 }
2377 result = ShiftOperand(reg<T>(instr->Rn()), shift_op, shift);
2378 }
2379 set_reg<T>(instr->Rd(), result);
2380 }
2381
2382
VisitDataProcessing2Source(Instruction * instr)2383 void Simulator::VisitDataProcessing2Source(Instruction* instr) {
2384 if (instr->SixtyFourBits()) {
2385 DataProcessing2Source<int64_t>(instr);
2386 } else {
2387 DataProcessing2Source<int32_t>(instr);
2388 }
2389 }
2390
2391
2392 // The algorithm used is described in section 8.2 of
2393 // Hacker's Delight, by Henry S. Warren, Jr.
2394 // It assumes that a right shift on a signed integer is an arithmetic shift.
MultiplyHighSigned(int64_t u,int64_t v)2395 static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
2396 uint64_t u0, v0, w0;
2397 int64_t u1, v1, w1, w2, t;
2398
2399 u0 = u & 0xFFFFFFFFL;
2400 u1 = u >> 32;
2401 v0 = v & 0xFFFFFFFFL;
2402 v1 = v >> 32;
2403
2404 w0 = u0 * v0;
2405 t = u1 * v0 + (w0 >> 32);
2406 w1 = t & 0xFFFFFFFFL;
2407 w2 = t >> 32;
2408 w1 = u0 * v1 + w1;
2409
2410 return u1 * v1 + w2 + (w1 >> 32);
2411 }
2412
2413
VisitDataProcessing3Source(Instruction * instr)2414 void Simulator::VisitDataProcessing3Source(Instruction* instr) {
2415 int64_t result = 0;
2416 // Extract and sign- or zero-extend 32-bit arguments for widening operations.
2417 uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
2418 uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
2419 int64_t rn_s32 = reg<int32_t>(instr->Rn());
2420 int64_t rm_s32 = reg<int32_t>(instr->Rm());
2421 switch (instr->Mask(DataProcessing3SourceMask)) {
2422 case MADD_w:
2423 case MADD_x:
2424 result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
2425 break;
2426 case MSUB_w:
2427 case MSUB_x:
2428 result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
2429 break;
2430 case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
2431 case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
2432 case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
2433 case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
2434 case SMULH_x:
2435 DCHECK_EQ(instr->Ra(), kZeroRegCode);
2436 result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
2437 break;
2438 default: UNIMPLEMENTED();
2439 }
2440
2441 if (instr->SixtyFourBits()) {
2442 set_xreg(instr->Rd(), result);
2443 } else {
2444 set_wreg(instr->Rd(), static_cast<int32_t>(result));
2445 }
2446 }
2447
2448
2449 template <typename T>
BitfieldHelper(Instruction * instr)2450 void Simulator::BitfieldHelper(Instruction* instr) {
2451 typedef typename std::make_unsigned<T>::type unsignedT;
2452 T reg_size = sizeof(T) * 8;
2453 T R = instr->ImmR();
2454 T S = instr->ImmS();
2455 T diff = S - R;
2456 T mask;
2457 if (diff >= 0) {
2458 mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
2459 : static_cast<T>(-1);
2460 } else {
2461 uint64_t umask = ((1L << (S + 1)) - 1);
2462 umask = (umask >> R) | (umask << (reg_size - R));
2463 mask = static_cast<T>(umask);
2464 diff += reg_size;
2465 }
2466
2467 // inzero indicates if the extracted bitfield is inserted into the
2468 // destination register value or in zero.
2469 // If extend is true, extend the sign of the extracted bitfield.
2470 bool inzero = false;
2471 bool extend = false;
2472 switch (instr->Mask(BitfieldMask)) {
2473 case BFM_x:
2474 case BFM_w:
2475 break;
2476 case SBFM_x:
2477 case SBFM_w:
2478 inzero = true;
2479 extend = true;
2480 break;
2481 case UBFM_x:
2482 case UBFM_w:
2483 inzero = true;
2484 break;
2485 default:
2486 UNIMPLEMENTED();
2487 }
2488
2489 T dst = inzero ? 0 : reg<T>(instr->Rd());
2490 T src = reg<T>(instr->Rn());
2491 // Rotate source bitfield into place.
2492 T result = (static_cast<unsignedT>(src) >> R) | (src << (reg_size - R));
2493 // Determine the sign extension.
2494 T topbits_preshift = (static_cast<T>(1) << (reg_size - diff - 1)) - 1;
2495 T signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0)
2496 << (diff + 1);
2497
2498 // Merge sign extension, dest/zero and bitfield.
2499 result = signbits | (result & mask) | (dst & ~mask);
2500
2501 set_reg<T>(instr->Rd(), result);
2502 }
2503
2504
VisitBitfield(Instruction * instr)2505 void Simulator::VisitBitfield(Instruction* instr) {
2506 if (instr->SixtyFourBits()) {
2507 BitfieldHelper<int64_t>(instr);
2508 } else {
2509 BitfieldHelper<int32_t>(instr);
2510 }
2511 }
2512
2513
VisitExtract(Instruction * instr)2514 void Simulator::VisitExtract(Instruction* instr) {
2515 if (instr->SixtyFourBits()) {
2516 Extract<uint64_t>(instr);
2517 } else {
2518 Extract<uint32_t>(instr);
2519 }
2520 }
2521
2522
VisitFPImmediate(Instruction * instr)2523 void Simulator::VisitFPImmediate(Instruction* instr) {
2524 AssertSupportedFPCR();
2525
2526 unsigned dest = instr->Rd();
2527 switch (instr->Mask(FPImmediateMask)) {
2528 case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
2529 case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
2530 default: UNREACHABLE();
2531 }
2532 }
2533
2534
VisitFPIntegerConvert(Instruction * instr)2535 void Simulator::VisitFPIntegerConvert(Instruction* instr) {
2536 AssertSupportedFPCR();
2537
2538 unsigned dst = instr->Rd();
2539 unsigned src = instr->Rn();
2540
2541 FPRounding round = fpcr().RMode();
2542
2543 switch (instr->Mask(FPIntegerConvertMask)) {
2544 case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
2545 case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
2546 case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
2547 case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
2548 case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
2549 case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
2550 case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
2551 case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
2552 case FCVTMS_ws:
2553 set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
2554 break;
2555 case FCVTMS_xs:
2556 set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
2557 break;
2558 case FCVTMS_wd:
2559 set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
2560 break;
2561 case FCVTMS_xd:
2562 set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
2563 break;
2564 case FCVTMU_ws:
2565 set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
2566 break;
2567 case FCVTMU_xs:
2568 set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
2569 break;
2570 case FCVTMU_wd:
2571 set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
2572 break;
2573 case FCVTMU_xd:
2574 set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
2575 break;
2576 case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
2577 case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
2578 case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
2579 case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
2580 case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
2581 case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
2582 case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
2583 case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
2584 case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
2585 case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
2586 case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
2587 case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
2588 case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
2589 case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
2590 case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
2591 case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
2592 case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
2593 case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
2594 case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
2595 case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
2596
2597 // A 32-bit input can be handled in the same way as a 64-bit input, since
2598 // the sign- or zero-extension will not affect the conversion.
2599 case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
2600 case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
2601 case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
2602 case UCVTF_dw: {
2603 set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
2604 break;
2605 }
2606 case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
2607 case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
2608 case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
2609 case UCVTF_sw: {
2610 set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
2611 break;
2612 }
2613
2614 default: UNREACHABLE();
2615 }
2616 }
2617
2618
VisitFPFixedPointConvert(Instruction * instr)2619 void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
2620 AssertSupportedFPCR();
2621
2622 unsigned dst = instr->Rd();
2623 unsigned src = instr->Rn();
2624 int fbits = 64 - instr->FPScale();
2625
2626 FPRounding round = fpcr().RMode();
2627
2628 switch (instr->Mask(FPFixedPointConvertMask)) {
2629 // A 32-bit input can be handled in the same way as a 64-bit input, since
2630 // the sign- or zero-extension will not affect the conversion.
2631 case SCVTF_dx_fixed:
2632 set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
2633 break;
2634 case SCVTF_dw_fixed:
2635 set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
2636 break;
2637 case UCVTF_dx_fixed:
2638 set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
2639 break;
2640 case UCVTF_dw_fixed: {
2641 set_dreg(dst,
2642 UFixedToDouble(reg<uint32_t>(src), fbits, round));
2643 break;
2644 }
2645 case SCVTF_sx_fixed:
2646 set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
2647 break;
2648 case SCVTF_sw_fixed:
2649 set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
2650 break;
2651 case UCVTF_sx_fixed:
2652 set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
2653 break;
2654 case UCVTF_sw_fixed: {
2655 set_sreg(dst,
2656 UFixedToFloat(reg<uint32_t>(src), fbits, round));
2657 break;
2658 }
2659 default: UNREACHABLE();
2660 }
2661 }
2662
2663
VisitFPCompare(Instruction * instr)2664 void Simulator::VisitFPCompare(Instruction* instr) {
2665 AssertSupportedFPCR();
2666
2667 switch (instr->Mask(FPCompareMask)) {
2668 case FCMP_s:
2669 FPCompare(sreg(instr->Rn()), sreg(instr->Rm()));
2670 break;
2671 case FCMP_d:
2672 FPCompare(dreg(instr->Rn()), dreg(instr->Rm()));
2673 break;
2674 case FCMP_s_zero:
2675 FPCompare(sreg(instr->Rn()), 0.0f);
2676 break;
2677 case FCMP_d_zero:
2678 FPCompare(dreg(instr->Rn()), 0.0);
2679 break;
2680 default: UNIMPLEMENTED();
2681 }
2682 }
2683
2684
VisitFPConditionalCompare(Instruction * instr)2685 void Simulator::VisitFPConditionalCompare(Instruction* instr) {
2686 AssertSupportedFPCR();
2687
2688 switch (instr->Mask(FPConditionalCompareMask)) {
2689 case FCCMP_s:
2690 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2691 FPCompare(sreg(instr->Rn()), sreg(instr->Rm()));
2692 } else {
2693 nzcv().SetFlags(instr->Nzcv());
2694 LogSystemRegister(NZCV);
2695 }
2696 break;
2697 case FCCMP_d: {
2698 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2699 FPCompare(dreg(instr->Rn()), dreg(instr->Rm()));
2700 } else {
2701 // If the condition fails, set the status flags to the nzcv immediate.
2702 nzcv().SetFlags(instr->Nzcv());
2703 LogSystemRegister(NZCV);
2704 }
2705 break;
2706 }
2707 default: UNIMPLEMENTED();
2708 }
2709 }
2710
2711
VisitFPConditionalSelect(Instruction * instr)2712 void Simulator::VisitFPConditionalSelect(Instruction* instr) {
2713 AssertSupportedFPCR();
2714
2715 Instr selected;
2716 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
2717 selected = instr->Rn();
2718 } else {
2719 selected = instr->Rm();
2720 }
2721
2722 switch (instr->Mask(FPConditionalSelectMask)) {
2723 case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
2724 case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
2725 default: UNIMPLEMENTED();
2726 }
2727 }
2728
2729
VisitFPDataProcessing1Source(Instruction * instr)2730 void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
2731 AssertSupportedFPCR();
2732
2733 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
2734 VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
2735 SimVRegister& rd = vreg(instr->Rd());
2736 SimVRegister& rn = vreg(instr->Rn());
2737 bool inexact_exception = false;
2738
2739 unsigned fd = instr->Rd();
2740 unsigned fn = instr->Rn();
2741
2742 switch (instr->Mask(FPDataProcessing1SourceMask)) {
2743 case FMOV_s:
2744 set_sreg(fd, sreg(fn));
2745 return;
2746 case FMOV_d:
2747 set_dreg(fd, dreg(fn));
2748 return;
2749 case FABS_s:
2750 case FABS_d:
2751 fabs_(vform, vreg(fd), vreg(fn));
2752 // Explicitly log the register update whilst we have type information.
2753 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
2754 return;
2755 case FNEG_s:
2756 case FNEG_d:
2757 fneg(vform, vreg(fd), vreg(fn));
2758 // Explicitly log the register update whilst we have type information.
2759 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
2760 return;
2761 case FCVT_ds:
2762 set_dreg(fd, FPToDouble(sreg(fn)));
2763 return;
2764 case FCVT_sd:
2765 set_sreg(fd, FPToFloat(dreg(fn), FPTieEven));
2766 return;
2767 case FCVT_hs:
2768 set_hreg(fd, FPToFloat16(sreg(fn), FPTieEven));
2769 return;
2770 case FCVT_sh:
2771 set_sreg(fd, FPToFloat(hreg(fn)));
2772 return;
2773 case FCVT_dh:
2774 set_dreg(fd, FPToDouble(FPToFloat(hreg(fn))));
2775 return;
2776 case FCVT_hd:
2777 set_hreg(fd, FPToFloat16(dreg(fn), FPTieEven));
2778 return;
2779 case FSQRT_s:
2780 case FSQRT_d:
2781 fsqrt(vform, rd, rn);
2782 // Explicitly log the register update whilst we have type information.
2783 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
2784 return;
2785 case FRINTI_s:
2786 case FRINTI_d:
2787 break; // Use FPCR rounding mode.
2788 case FRINTX_s:
2789 case FRINTX_d:
2790 inexact_exception = true;
2791 break;
2792 case FRINTA_s:
2793 case FRINTA_d:
2794 fpcr_rounding = FPTieAway;
2795 break;
2796 case FRINTM_s:
2797 case FRINTM_d:
2798 fpcr_rounding = FPNegativeInfinity;
2799 break;
2800 case FRINTN_s:
2801 case FRINTN_d:
2802 fpcr_rounding = FPTieEven;
2803 break;
2804 case FRINTP_s:
2805 case FRINTP_d:
2806 fpcr_rounding = FPPositiveInfinity;
2807 break;
2808 case FRINTZ_s:
2809 case FRINTZ_d:
2810 fpcr_rounding = FPZero;
2811 break;
2812 default:
2813 UNIMPLEMENTED();
2814 }
2815
2816 // Only FRINT* instructions fall through the switch above.
2817 frint(vform, rd, rn, fpcr_rounding, inexact_exception);
2818 // Explicitly log the register update whilst we have type information
2819 LogVRegister(fd, GetPrintRegisterFormatFP(vform));
2820 }
2821
VisitFPDataProcessing2Source(Instruction * instr)2822 void Simulator::VisitFPDataProcessing2Source(Instruction* instr) {
2823 AssertSupportedFPCR();
2824
2825 VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
2826 SimVRegister& rd = vreg(instr->Rd());
2827 SimVRegister& rn = vreg(instr->Rn());
2828 SimVRegister& rm = vreg(instr->Rm());
2829
2830 switch (instr->Mask(FPDataProcessing2SourceMask)) {
2831 case FADD_s:
2832 case FADD_d:
2833 fadd(vform, rd, rn, rm);
2834 break;
2835 case FSUB_s:
2836 case FSUB_d:
2837 fsub(vform, rd, rn, rm);
2838 break;
2839 case FMUL_s:
2840 case FMUL_d:
2841 fmul(vform, rd, rn, rm);
2842 break;
2843 case FNMUL_s:
2844 case FNMUL_d:
2845 fnmul(vform, rd, rn, rm);
2846 break;
2847 case FDIV_s:
2848 case FDIV_d:
2849 fdiv(vform, rd, rn, rm);
2850 break;
2851 case FMAX_s:
2852 case FMAX_d:
2853 fmax(vform, rd, rn, rm);
2854 break;
2855 case FMIN_s:
2856 case FMIN_d:
2857 fmin(vform, rd, rn, rm);
2858 break;
2859 case FMAXNM_s:
2860 case FMAXNM_d:
2861 fmaxnm(vform, rd, rn, rm);
2862 break;
2863 case FMINNM_s:
2864 case FMINNM_d:
2865 fminnm(vform, rd, rn, rm);
2866 break;
2867 default:
2868 UNREACHABLE();
2869 }
2870 // Explicitly log the register update whilst we have type information.
2871 LogVRegister(instr->Rd(), GetPrintRegisterFormatFP(vform));
2872 }
2873
VisitFPDataProcessing3Source(Instruction * instr)2874 void Simulator::VisitFPDataProcessing3Source(Instruction* instr) {
2875 AssertSupportedFPCR();
2876
2877 unsigned fd = instr->Rd();
2878 unsigned fn = instr->Rn();
2879 unsigned fm = instr->Rm();
2880 unsigned fa = instr->Ra();
2881
2882 switch (instr->Mask(FPDataProcessing3SourceMask)) {
2883 // fd = fa +/- (fn * fm)
2884 case FMADD_s:
2885 set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm)));
2886 break;
2887 case FMSUB_s:
2888 set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm)));
2889 break;
2890 case FMADD_d:
2891 set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm)));
2892 break;
2893 case FMSUB_d:
2894 set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm)));
2895 break;
2896 // Negated variants of the above.
2897 case FNMADD_s:
2898 set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
2899 break;
2900 case FNMSUB_s:
2901 set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
2902 break;
2903 case FNMADD_d:
2904 set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
2905 break;
2906 case FNMSUB_d:
2907 set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
2908 break;
2909 default:
2910 UNIMPLEMENTED();
2911 }
2912 }
2913
FPProcessNaNs(Instruction * instr)2914 bool Simulator::FPProcessNaNs(Instruction* instr) {
2915 unsigned fd = instr->Rd();
2916 unsigned fn = instr->Rn();
2917 unsigned fm = instr->Rm();
2918 bool done = false;
2919
2920 if (instr->Mask(FP64) == FP64) {
2921 double result = FPProcessNaNs(dreg(fn), dreg(fm));
2922 if (std::isnan(result)) {
2923 set_dreg(fd, result);
2924 done = true;
2925 }
2926 } else {
2927 float result = FPProcessNaNs(sreg(fn), sreg(fm));
2928 if (std::isnan(result)) {
2929 set_sreg(fd, result);
2930 done = true;
2931 }
2932 }
2933
2934 return done;
2935 }
2936
2937
VisitSystem(Instruction * instr)2938 void Simulator::VisitSystem(Instruction* instr) {
2939 // Some system instructions hijack their Op and Cp fields to represent a
2940 // range of immediates instead of indicating a different instruction. This
2941 // makes the decoding tricky.
2942 if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
2943 switch (instr->Mask(SystemSysRegMask)) {
2944 case MRS: {
2945 switch (instr->ImmSystemRegister()) {
2946 case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
2947 case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
2948 default: UNIMPLEMENTED();
2949 }
2950 break;
2951 }
2952 case MSR: {
2953 switch (instr->ImmSystemRegister()) {
2954 case NZCV:
2955 nzcv().SetRawValue(wreg(instr->Rt()));
2956 LogSystemRegister(NZCV);
2957 break;
2958 case FPCR:
2959 fpcr().SetRawValue(wreg(instr->Rt()));
2960 LogSystemRegister(FPCR);
2961 break;
2962 default: UNIMPLEMENTED();
2963 }
2964 break;
2965 }
2966 }
2967 } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
2968 DCHECK(instr->Mask(SystemHintMask) == HINT);
2969 switch (instr->ImmHint()) {
2970 case NOP:
2971 case CSDB:
2972 break;
2973 default: UNIMPLEMENTED();
2974 }
2975 } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
2976 __sync_synchronize();
2977 } else {
2978 UNIMPLEMENTED();
2979 }
2980 }
2981
2982
GetValue(const char * desc,int64_t * value)2983 bool Simulator::GetValue(const char* desc, int64_t* value) {
2984 int regnum = CodeFromName(desc);
2985 if (regnum >= 0) {
2986 unsigned code = regnum;
2987 if (code == kZeroRegCode) {
2988 // Catch the zero register and return 0.
2989 *value = 0;
2990 return true;
2991 } else if (code == kSPRegInternalCode) {
2992 // Translate the stack pointer code to 31, for Reg31IsStackPointer.
2993 code = 31;
2994 }
2995 if (desc[0] == 'w') {
2996 *value = wreg(code, Reg31IsStackPointer);
2997 } else {
2998 *value = xreg(code, Reg31IsStackPointer);
2999 }
3000 return true;
3001 } else if (strncmp(desc, "0x", 2) == 0) {
3002 return SScanF(desc + 2, "%" SCNx64,
3003 reinterpret_cast<uint64_t*>(value)) == 1;
3004 } else {
3005 return SScanF(desc, "%" SCNu64,
3006 reinterpret_cast<uint64_t*>(value)) == 1;
3007 }
3008 }
3009
3010
PrintValue(const char * desc)3011 bool Simulator::PrintValue(const char* desc) {
3012 if (strcmp(desc, "sp") == 0) {
3013 DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
3014 PrintF(stream_, "%s sp:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
3015 clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
3016 return true;
3017 } else if (strcmp(desc, "wsp") == 0) {
3018 DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
3019 PrintF(stream_, "%s wsp:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
3020 clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
3021 return true;
3022 }
3023
3024 int i = CodeFromName(desc);
3025 static_assert(kNumberOfRegisters == kNumberOfVRegisters,
3026 "Must be same number of Registers as VRegisters.");
3027 if (i < 0 || static_cast<unsigned>(i) >= kNumberOfVRegisters) return false;
3028
3029 if (desc[0] == 'v') {
3030 PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
3031 clr_vreg_name, VRegNameForCode(i), clr_vreg_value,
3032 bit_cast<uint64_t>(dreg(i)), clr_normal, clr_vreg_name,
3033 DRegNameForCode(i), clr_vreg_value, dreg(i), clr_vreg_name,
3034 SRegNameForCode(i), clr_vreg_value, sreg(i), clr_normal);
3035 return true;
3036 } else if (desc[0] == 'd') {
3037 PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, DRegNameForCode(i),
3038 clr_vreg_value, dreg(i), clr_normal);
3039 return true;
3040 } else if (desc[0] == 's') {
3041 PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, SRegNameForCode(i),
3042 clr_vreg_value, sreg(i), clr_normal);
3043 return true;
3044 } else if (desc[0] == 'w') {
3045 PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n",
3046 clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
3047 return true;
3048 } else {
3049 // X register names have a wide variety of starting characters, but anything
3050 // else will be an X register.
3051 PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s\n",
3052 clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
3053 return true;
3054 }
3055 }
3056
3057
Debug()3058 void Simulator::Debug() {
3059 #define COMMAND_SIZE 63
3060 #define ARG_SIZE 255
3061
3062 #define STR(a) #a
3063 #define XSTR(a) STR(a)
3064
3065 char cmd[COMMAND_SIZE + 1];
3066 char arg1[ARG_SIZE + 1];
3067 char arg2[ARG_SIZE + 1];
3068 char* argv[3] = { cmd, arg1, arg2 };
3069
3070 // Make sure to have a proper terminating character if reaching the limit.
3071 cmd[COMMAND_SIZE] = 0;
3072 arg1[ARG_SIZE] = 0;
3073 arg2[ARG_SIZE] = 0;
3074
3075 bool done = false;
3076 bool cleared_log_disasm_bit = false;
3077
3078 while (!done) {
3079 // Disassemble the next instruction to execute before doing anything else.
3080 PrintInstructionsAt(pc_, 1);
3081 // Read the command line.
3082 char* line = ReadLine("sim> ");
3083 if (line == nullptr) {
3084 break;
3085 } else {
3086 // Repeat last command by default.
3087 char* last_input = last_debugger_input();
3088 if (strcmp(line, "\n") == 0 && (last_input != nullptr)) {
3089 DeleteArray(line);
3090 line = last_input;
3091 } else {
3092 // Update the latest command ran
3093 set_last_debugger_input(line);
3094 }
3095
3096 // Use sscanf to parse the individual parts of the command line. At the
3097 // moment no command expects more than two parameters.
3098 int argc = SScanF(line,
3099 "%" XSTR(COMMAND_SIZE) "s "
3100 "%" XSTR(ARG_SIZE) "s "
3101 "%" XSTR(ARG_SIZE) "s",
3102 cmd, arg1, arg2);
3103
3104 // stepi / si ------------------------------------------------------------
3105 if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
3106 // We are about to execute instructions, after which by default we
3107 // should increment the pc_. If it was set when reaching this debug
3108 // instruction, it has not been cleared because this instruction has not
3109 // completed yet. So clear it manually.
3110 pc_modified_ = false;
3111
3112 if (argc == 1) {
3113 ExecuteInstruction();
3114 } else {
3115 int64_t number_of_instructions_to_execute = 1;
3116 GetValue(arg1, &number_of_instructions_to_execute);
3117
3118 set_log_parameters(log_parameters() | LOG_DISASM);
3119 while (number_of_instructions_to_execute-- > 0) {
3120 ExecuteInstruction();
3121 }
3122 set_log_parameters(log_parameters() & ~LOG_DISASM);
3123 PrintF("\n");
3124 }
3125
3126 // If it was necessary, the pc has already been updated or incremented
3127 // when executing the instruction. So we do not want it to be updated
3128 // again. It will be cleared when exiting.
3129 pc_modified_ = true;
3130
3131 // next / n --------------------------------------------------------------
3132 } else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
3133 // Tell the simulator to break after the next executed BL.
3134 break_on_next_ = true;
3135 // Continue.
3136 done = true;
3137
3138 // continue / cont / c ---------------------------------------------------
3139 } else if ((strcmp(cmd, "continue") == 0) ||
3140 (strcmp(cmd, "cont") == 0) ||
3141 (strcmp(cmd, "c") == 0)) {
3142 // Leave the debugger shell.
3143 done = true;
3144
3145 // disassemble / disasm / di ---------------------------------------------
3146 } else if (strcmp(cmd, "disassemble") == 0 ||
3147 strcmp(cmd, "disasm") == 0 ||
3148 strcmp(cmd, "di") == 0) {
3149 int64_t n_of_instrs_to_disasm = 10; // default value.
3150 int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
3151 if (argc >= 2) { // disasm <n of instrs>
3152 GetValue(arg1, &n_of_instrs_to_disasm);
3153 }
3154 if (argc >= 3) { // disasm <n of instrs> <address>
3155 GetValue(arg2, &address);
3156 }
3157
3158 // Disassemble.
3159 PrintInstructionsAt(reinterpret_cast<Instruction*>(address),
3160 n_of_instrs_to_disasm);
3161 PrintF("\n");
3162
3163 // print / p -------------------------------------------------------------
3164 } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
3165 if (argc == 2) {
3166 if (strcmp(arg1, "all") == 0) {
3167 PrintRegisters();
3168 PrintVRegisters();
3169 } else {
3170 if (!PrintValue(arg1)) {
3171 PrintF("%s unrecognized\n", arg1);
3172 }
3173 }
3174 } else {
3175 PrintF(
3176 "print <register>\n"
3177 " Print the content of a register. (alias 'p')\n"
3178 " 'print all' will print all registers.\n"
3179 " Use 'printobject' to get more details about the value.\n");
3180 }
3181
3182 // printobject / po ------------------------------------------------------
3183 } else if ((strcmp(cmd, "printobject") == 0) ||
3184 (strcmp(cmd, "po") == 0)) {
3185 if (argc == 2) {
3186 int64_t value;
3187 StdoutStream os;
3188 if (GetValue(arg1, &value)) {
3189 Object* obj = reinterpret_cast<Object*>(value);
3190 os << arg1 << ": \n";
3191 #ifdef DEBUG
3192 obj->Print(os);
3193 os << "\n";
3194 #else
3195 os << Brief(obj) << "\n";
3196 #endif
3197 } else {
3198 os << arg1 << " unrecognized\n";
3199 }
3200 } else {
3201 PrintF("printobject <value>\n"
3202 "printobject <register>\n"
3203 " Print details about the value. (alias 'po')\n");
3204 }
3205
3206 // stack / mem ----------------------------------------------------------
3207 } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
3208 int64_t* cur = nullptr;
3209 int64_t* end = nullptr;
3210 int next_arg = 1;
3211
3212 if (strcmp(cmd, "stack") == 0) {
3213 cur = reinterpret_cast<int64_t*>(sp());
3214
3215 } else { // "mem"
3216 int64_t value;
3217 if (!GetValue(arg1, &value)) {
3218 PrintF("%s unrecognized\n", arg1);
3219 continue;
3220 }
3221 cur = reinterpret_cast<int64_t*>(value);
3222 next_arg++;
3223 }
3224
3225 int64_t words = 0;
3226 if (argc == next_arg) {
3227 words = 10;
3228 } else if (argc == next_arg + 1) {
3229 if (!GetValue(argv[next_arg], &words)) {
3230 PrintF("%s unrecognized\n", argv[next_arg]);
3231 PrintF("Printing 10 double words by default");
3232 words = 10;
3233 }
3234 } else {
3235 UNREACHABLE();
3236 }
3237 end = cur + words;
3238
3239 while (cur < end) {
3240 PrintF(" 0x%016" PRIx64 ": 0x%016" PRIx64 " %10" PRId64,
3241 reinterpret_cast<uint64_t>(cur), *cur, *cur);
3242 HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
3243 int64_t value = *cur;
3244 Heap* current_heap = isolate_->heap();
3245 if (((value & 1) == 0) ||
3246 current_heap->ContainsSlow(obj->address())) {
3247 PrintF(" (");
3248 if ((value & kSmiTagMask) == 0) {
3249 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
3250 int32_t untagged = (value >> kSmiShift) & 0xFFFFFFFF;
3251 PrintF("smi %" PRId32, untagged);
3252 } else {
3253 obj->ShortPrint();
3254 }
3255 PrintF(")");
3256 }
3257 PrintF("\n");
3258 cur++;
3259 }
3260
3261 // trace / t -------------------------------------------------------------
3262 } else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
3263 if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
3264 (LOG_DISASM | LOG_REGS)) {
3265 PrintF("Enabling disassembly and registers tracing\n");
3266 set_log_parameters(log_parameters() | LOG_DISASM | LOG_REGS);
3267 } else {
3268 PrintF("Disabling disassembly and registers tracing\n");
3269 set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
3270 }
3271
3272 // break / b -------------------------------------------------------------
3273 } else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
3274 if (argc == 2) {
3275 int64_t value;
3276 if (GetValue(arg1, &value)) {
3277 SetBreakpoint(reinterpret_cast<Instruction*>(value));
3278 } else {
3279 PrintF("%s unrecognized\n", arg1);
3280 }
3281 } else {
3282 ListBreakpoints();
3283 PrintF("Use `break <address>` to set or disable a breakpoint\n");
3284 }
3285
3286 // gdb -------------------------------------------------------------------
3287 } else if (strcmp(cmd, "gdb") == 0) {
3288 PrintF("Relinquishing control to gdb.\n");
3289 base::OS::DebugBreak();
3290 PrintF("Regaining control from gdb.\n");
3291
3292 // sysregs ---------------------------------------------------------------
3293 } else if (strcmp(cmd, "sysregs") == 0) {
3294 PrintSystemRegisters();
3295
3296 // help / h --------------------------------------------------------------
3297 } else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
3298 PrintF(
3299 "stepi / si\n"
3300 " stepi <n>\n"
3301 " Step <n> instructions.\n"
3302 "next / n\n"
3303 " Continue execution until a BL instruction is reached.\n"
3304 " At this point a breakpoint is set just after this BL.\n"
3305 " Then execution is resumed. It will probably later hit the\n"
3306 " breakpoint just set.\n"
3307 "continue / cont / c\n"
3308 " Continue execution from here.\n"
3309 "disassemble / disasm / di\n"
3310 " disassemble <n> <address>\n"
3311 " Disassemble <n> instructions from current <address>.\n"
3312 " By default <n> is 20 and <address> is the current pc.\n"
3313 "print / p\n"
3314 " print <register>\n"
3315 " Print the content of a register.\n"
3316 " 'print all' will print all registers.\n"
3317 " Use 'printobject' to get more details about the value.\n"
3318 "printobject / po\n"
3319 " printobject <value>\n"
3320 " printobject <register>\n"
3321 " Print details about the value.\n"
3322 "stack\n"
3323 " stack [<words>]\n"
3324 " Dump stack content, default dump 10 words\n"
3325 "mem\n"
3326 " mem <address> [<words>]\n"
3327 " Dump memory content, default dump 10 words\n"
3328 "trace / t\n"
3329 " Toggle disassembly and register tracing\n"
3330 "break / b\n"
3331 " break : list all breakpoints\n"
3332 " break <address> : set / enable / disable a breakpoint.\n"
3333 "gdb\n"
3334 " Enter gdb.\n"
3335 "sysregs\n"
3336 " Print all system registers (including NZCV).\n");
3337 } else {
3338 PrintF("Unknown command: %s\n", cmd);
3339 PrintF("Use 'help' for more information.\n");
3340 }
3341 }
3342 if (cleared_log_disasm_bit == true) {
3343 set_log_parameters(log_parameters_ | LOG_DISASM);
3344 }
3345 }
3346 }
3347
3348
VisitException(Instruction * instr)3349 void Simulator::VisitException(Instruction* instr) {
3350 switch (instr->Mask(ExceptionMask)) {
3351 case HLT: {
3352 if (instr->ImmException() == kImmExceptionIsDebug) {
3353 // Read the arguments encoded inline in the instruction stream.
3354 uint32_t code;
3355 uint32_t parameters;
3356
3357 memcpy(&code,
3358 pc_->InstructionAtOffset(kDebugCodeOffset),
3359 sizeof(code));
3360 memcpy(¶meters,
3361 pc_->InstructionAtOffset(kDebugParamsOffset),
3362 sizeof(parameters));
3363 char const *message =
3364 reinterpret_cast<char const*>(
3365 pc_->InstructionAtOffset(kDebugMessageOffset));
3366
3367 // Always print something when we hit a debug point that breaks.
3368 // We are going to break, so printing something is not an issue in
3369 // terms of speed.
3370 if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
3371 if (message != nullptr) {
3372 PrintF(stream_,
3373 "# %sDebugger hit %d: %s%s%s\n",
3374 clr_debug_number,
3375 code,
3376 clr_debug_message,
3377 message,
3378 clr_normal);
3379 } else {
3380 PrintF(stream_,
3381 "# %sDebugger hit %d.%s\n",
3382 clr_debug_number,
3383 code,
3384 clr_normal);
3385 }
3386 }
3387
3388 // Other options.
3389 switch (parameters & kDebuggerTracingDirectivesMask) {
3390 case TRACE_ENABLE:
3391 set_log_parameters(log_parameters() | parameters);
3392 if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
3393 if (parameters & LOG_REGS) { PrintRegisters(); }
3394 if (parameters & LOG_VREGS) {
3395 PrintVRegisters();
3396 }
3397 break;
3398 case TRACE_DISABLE:
3399 set_log_parameters(log_parameters() & ~parameters);
3400 break;
3401 case TRACE_OVERRIDE:
3402 set_log_parameters(parameters);
3403 break;
3404 default:
3405 // We don't support a one-shot LOG_DISASM.
3406 DCHECK_EQ(parameters & LOG_DISASM, 0);
3407 // Don't print information that is already being traced.
3408 parameters &= ~log_parameters();
3409 // Print the requested information.
3410 if (parameters & LOG_SYS_REGS) PrintSystemRegisters();
3411 if (parameters & LOG_REGS) PrintRegisters();
3412 if (parameters & LOG_VREGS) PrintVRegisters();
3413 }
3414
3415 // The stop parameters are inlined in the code. Skip them:
3416 // - Skip to the end of the message string.
3417 size_t size = kDebugMessageOffset + strlen(message) + 1;
3418 pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstrSize));
3419 // - Verify that the unreachable marker is present.
3420 DCHECK(pc_->Mask(ExceptionMask) == HLT);
3421 DCHECK_EQ(pc_->ImmException(), kImmExceptionIsUnreachable);
3422 // - Skip past the unreachable marker.
3423 set_pc(pc_->following());
3424
3425 // Check if the debugger should break.
3426 if (parameters & BREAK) Debug();
3427
3428 } else if (instr->ImmException() == kImmExceptionIsRedirectedCall) {
3429 DoRuntimeCall(instr);
3430 } else if (instr->ImmException() == kImmExceptionIsPrintf) {
3431 DoPrintf(instr);
3432
3433 } else if (instr->ImmException() == kImmExceptionIsUnreachable) {
3434 fprintf(stream_, "Hit UNREACHABLE marker at PC=%p.\n",
3435 reinterpret_cast<void*>(pc_));
3436 abort();
3437
3438 } else {
3439 base::OS::DebugBreak();
3440 }
3441 break;
3442 }
3443 case BRK:
3444 base::OS::DebugBreak();
3445 break;
3446 default:
3447 UNIMPLEMENTED();
3448 }
3449 }
3450
VisitNEON2RegMisc(Instruction * instr)3451 void Simulator::VisitNEON2RegMisc(Instruction* instr) {
3452 NEONFormatDecoder nfd(instr);
3453 VectorFormat vf = nfd.GetVectorFormat();
3454
3455 // Format mapping for "long pair" instructions, [su]addlp, [su]adalp.
3456 static const NEONFormatMap map_lp = {
3457 {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
3458 VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
3459
3460 static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}};
3461 VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
3462
3463 static const NEONFormatMap map_fcvtn = {{22, 30},
3464 {NF_4H, NF_8H, NF_2S, NF_4S}};
3465 VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
3466
3467 SimVRegister& rd = vreg(instr->Rd());
3468 SimVRegister& rn = vreg(instr->Rn());
3469
3470 if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
3471 // These instructions all use a two bit size field, except NOT and RBIT,
3472 // which use the field to encode the operation.
3473 switch (instr->Mask(NEON2RegMiscMask)) {
3474 case NEON_REV64:
3475 rev64(vf, rd, rn);
3476 break;
3477 case NEON_REV32:
3478 rev32(vf, rd, rn);
3479 break;
3480 case NEON_REV16:
3481 rev16(vf, rd, rn);
3482 break;
3483 case NEON_SUQADD:
3484 suqadd(vf, rd, rn);
3485 break;
3486 case NEON_USQADD:
3487 usqadd(vf, rd, rn);
3488 break;
3489 case NEON_CLS:
3490 cls(vf, rd, rn);
3491 break;
3492 case NEON_CLZ:
3493 clz(vf, rd, rn);
3494 break;
3495 case NEON_CNT:
3496 cnt(vf, rd, rn);
3497 break;
3498 case NEON_SQABS:
3499 abs(vf, rd, rn).SignedSaturate(vf);
3500 break;
3501 case NEON_SQNEG:
3502 neg(vf, rd, rn).SignedSaturate(vf);
3503 break;
3504 case NEON_CMGT_zero:
3505 cmp(vf, rd, rn, 0, gt);
3506 break;
3507 case NEON_CMGE_zero:
3508 cmp(vf, rd, rn, 0, ge);
3509 break;
3510 case NEON_CMEQ_zero:
3511 cmp(vf, rd, rn, 0, eq);
3512 break;
3513 case NEON_CMLE_zero:
3514 cmp(vf, rd, rn, 0, le);
3515 break;
3516 case NEON_CMLT_zero:
3517 cmp(vf, rd, rn, 0, lt);
3518 break;
3519 case NEON_ABS:
3520 abs(vf, rd, rn);
3521 break;
3522 case NEON_NEG:
3523 neg(vf, rd, rn);
3524 break;
3525 case NEON_SADDLP:
3526 saddlp(vf_lp, rd, rn);
3527 break;
3528 case NEON_UADDLP:
3529 uaddlp(vf_lp, rd, rn);
3530 break;
3531 case NEON_SADALP:
3532 sadalp(vf_lp, rd, rn);
3533 break;
3534 case NEON_UADALP:
3535 uadalp(vf_lp, rd, rn);
3536 break;
3537 case NEON_RBIT_NOT:
3538 vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
3539 switch (instr->FPType()) {
3540 case 0:
3541 not_(vf, rd, rn);
3542 break;
3543 case 1:
3544 rbit(vf, rd, rn);
3545 break;
3546 default:
3547 UNIMPLEMENTED();
3548 }
3549 break;
3550 }
3551 } else {
3552 VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap());
3553 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
3554 bool inexact_exception = false;
3555
3556 // These instructions all use a one bit size field, except XTN, SQXTUN,
3557 // SHLL, SQXTN and UQXTN, which use a two bit size field.
3558 switch (instr->Mask(NEON2RegMiscFPMask)) {
3559 case NEON_FABS:
3560 fabs_(fpf, rd, rn);
3561 return;
3562 case NEON_FNEG:
3563 fneg(fpf, rd, rn);
3564 return;
3565 case NEON_FSQRT:
3566 fsqrt(fpf, rd, rn);
3567 return;
3568 case NEON_FCVTL:
3569 if (instr->Mask(NEON_Q)) {
3570 fcvtl2(vf_fcvtl, rd, rn);
3571 } else {
3572 fcvtl(vf_fcvtl, rd, rn);
3573 }
3574 return;
3575 case NEON_FCVTN:
3576 if (instr->Mask(NEON_Q)) {
3577 fcvtn2(vf_fcvtn, rd, rn);
3578 } else {
3579 fcvtn(vf_fcvtn, rd, rn);
3580 }
3581 return;
3582 case NEON_FCVTXN:
3583 if (instr->Mask(NEON_Q)) {
3584 fcvtxn2(vf_fcvtn, rd, rn);
3585 } else {
3586 fcvtxn(vf_fcvtn, rd, rn);
3587 }
3588 return;
3589
3590 // The following instructions break from the switch statement, rather
3591 // than return.
3592 case NEON_FRINTI:
3593 break; // Use FPCR rounding mode.
3594 case NEON_FRINTX:
3595 inexact_exception = true;
3596 break;
3597 case NEON_FRINTA:
3598 fpcr_rounding = FPTieAway;
3599 break;
3600 case NEON_FRINTM:
3601 fpcr_rounding = FPNegativeInfinity;
3602 break;
3603 case NEON_FRINTN:
3604 fpcr_rounding = FPTieEven;
3605 break;
3606 case NEON_FRINTP:
3607 fpcr_rounding = FPPositiveInfinity;
3608 break;
3609 case NEON_FRINTZ:
3610 fpcr_rounding = FPZero;
3611 break;
3612
3613 // The remaining cases return to the caller.
3614 case NEON_FCVTNS:
3615 fcvts(fpf, rd, rn, FPTieEven);
3616 return;
3617 case NEON_FCVTNU:
3618 fcvtu(fpf, rd, rn, FPTieEven);
3619 return;
3620 case NEON_FCVTPS:
3621 fcvts(fpf, rd, rn, FPPositiveInfinity);
3622 return;
3623 case NEON_FCVTPU:
3624 fcvtu(fpf, rd, rn, FPPositiveInfinity);
3625 return;
3626 case NEON_FCVTMS:
3627 fcvts(fpf, rd, rn, FPNegativeInfinity);
3628 return;
3629 case NEON_FCVTMU:
3630 fcvtu(fpf, rd, rn, FPNegativeInfinity);
3631 return;
3632 case NEON_FCVTZS:
3633 fcvts(fpf, rd, rn, FPZero);
3634 return;
3635 case NEON_FCVTZU:
3636 fcvtu(fpf, rd, rn, FPZero);
3637 return;
3638 case NEON_FCVTAS:
3639 fcvts(fpf, rd, rn, FPTieAway);
3640 return;
3641 case NEON_FCVTAU:
3642 fcvtu(fpf, rd, rn, FPTieAway);
3643 return;
3644 case NEON_SCVTF:
3645 scvtf(fpf, rd, rn, 0, fpcr_rounding);
3646 return;
3647 case NEON_UCVTF:
3648 ucvtf(fpf, rd, rn, 0, fpcr_rounding);
3649 return;
3650 case NEON_URSQRTE:
3651 ursqrte(fpf, rd, rn);
3652 return;
3653 case NEON_URECPE:
3654 urecpe(fpf, rd, rn);
3655 return;
3656 case NEON_FRSQRTE:
3657 frsqrte(fpf, rd, rn);
3658 return;
3659 case NEON_FRECPE:
3660 frecpe(fpf, rd, rn, fpcr_rounding);
3661 return;
3662 case NEON_FCMGT_zero:
3663 fcmp_zero(fpf, rd, rn, gt);
3664 return;
3665 case NEON_FCMGE_zero:
3666 fcmp_zero(fpf, rd, rn, ge);
3667 return;
3668 case NEON_FCMEQ_zero:
3669 fcmp_zero(fpf, rd, rn, eq);
3670 return;
3671 case NEON_FCMLE_zero:
3672 fcmp_zero(fpf, rd, rn, le);
3673 return;
3674 case NEON_FCMLT_zero:
3675 fcmp_zero(fpf, rd, rn, lt);
3676 return;
3677 default:
3678 if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
3679 (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
3680 switch (instr->Mask(NEON2RegMiscMask)) {
3681 case NEON_XTN:
3682 xtn(vf, rd, rn);
3683 return;
3684 case NEON_SQXTN:
3685 sqxtn(vf, rd, rn);
3686 return;
3687 case NEON_UQXTN:
3688 uqxtn(vf, rd, rn);
3689 return;
3690 case NEON_SQXTUN:
3691 sqxtun(vf, rd, rn);
3692 return;
3693 case NEON_SHLL:
3694 vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
3695 if (instr->Mask(NEON_Q)) {
3696 shll2(vf, rd, rn);
3697 } else {
3698 shll(vf, rd, rn);
3699 }
3700 return;
3701 default:
3702 UNIMPLEMENTED();
3703 }
3704 } else {
3705 UNIMPLEMENTED();
3706 }
3707 }
3708
3709 // Only FRINT* instructions fall through the switch above.
3710 frint(fpf, rd, rn, fpcr_rounding, inexact_exception);
3711 }
3712 }
3713
VisitNEON3Same(Instruction * instr)3714 void Simulator::VisitNEON3Same(Instruction* instr) {
3715 NEONFormatDecoder nfd(instr);
3716 SimVRegister& rd = vreg(instr->Rd());
3717 SimVRegister& rn = vreg(instr->Rn());
3718 SimVRegister& rm = vreg(instr->Rm());
3719
3720 if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
3721 VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
3722 switch (instr->Mask(NEON3SameLogicalMask)) {
3723 case NEON_AND:
3724 and_(vf, rd, rn, rm);
3725 break;
3726 case NEON_ORR:
3727 orr(vf, rd, rn, rm);
3728 break;
3729 case NEON_ORN:
3730 orn(vf, rd, rn, rm);
3731 break;
3732 case NEON_EOR:
3733 eor(vf, rd, rn, rm);
3734 break;
3735 case NEON_BIC:
3736 bic(vf, rd, rn, rm);
3737 break;
3738 case NEON_BIF:
3739 bif(vf, rd, rn, rm);
3740 break;
3741 case NEON_BIT:
3742 bit(vf, rd, rn, rm);
3743 break;
3744 case NEON_BSL:
3745 bsl(vf, rd, rn, rm);
3746 break;
3747 default:
3748 UNIMPLEMENTED();
3749 }
3750 } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
3751 VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
3752 switch (instr->Mask(NEON3SameFPMask)) {
3753 case NEON_FADD:
3754 fadd(vf, rd, rn, rm);
3755 break;
3756 case NEON_FSUB:
3757 fsub(vf, rd, rn, rm);
3758 break;
3759 case NEON_FMUL:
3760 fmul(vf, rd, rn, rm);
3761 break;
3762 case NEON_FDIV:
3763 fdiv(vf, rd, rn, rm);
3764 break;
3765 case NEON_FMAX:
3766 fmax(vf, rd, rn, rm);
3767 break;
3768 case NEON_FMIN:
3769 fmin(vf, rd, rn, rm);
3770 break;
3771 case NEON_FMAXNM:
3772 fmaxnm(vf, rd, rn, rm);
3773 break;
3774 case NEON_FMINNM:
3775 fminnm(vf, rd, rn, rm);
3776 break;
3777 case NEON_FMLA:
3778 fmla(vf, rd, rn, rm);
3779 break;
3780 case NEON_FMLS:
3781 fmls(vf, rd, rn, rm);
3782 break;
3783 case NEON_FMULX:
3784 fmulx(vf, rd, rn, rm);
3785 break;
3786 case NEON_FACGE:
3787 fabscmp(vf, rd, rn, rm, ge);
3788 break;
3789 case NEON_FACGT:
3790 fabscmp(vf, rd, rn, rm, gt);
3791 break;
3792 case NEON_FCMEQ:
3793 fcmp(vf, rd, rn, rm, eq);
3794 break;
3795 case NEON_FCMGE:
3796 fcmp(vf, rd, rn, rm, ge);
3797 break;
3798 case NEON_FCMGT:
3799 fcmp(vf, rd, rn, rm, gt);
3800 break;
3801 case NEON_FRECPS:
3802 frecps(vf, rd, rn, rm);
3803 break;
3804 case NEON_FRSQRTS:
3805 frsqrts(vf, rd, rn, rm);
3806 break;
3807 case NEON_FABD:
3808 fabd(vf, rd, rn, rm);
3809 break;
3810 case NEON_FADDP:
3811 faddp(vf, rd, rn, rm);
3812 break;
3813 case NEON_FMAXP:
3814 fmaxp(vf, rd, rn, rm);
3815 break;
3816 case NEON_FMAXNMP:
3817 fmaxnmp(vf, rd, rn, rm);
3818 break;
3819 case NEON_FMINP:
3820 fminp(vf, rd, rn, rm);
3821 break;
3822 case NEON_FMINNMP:
3823 fminnmp(vf, rd, rn, rm);
3824 break;
3825 default:
3826 UNIMPLEMENTED();
3827 }
3828 } else {
3829 VectorFormat vf = nfd.GetVectorFormat();
3830 switch (instr->Mask(NEON3SameMask)) {
3831 case NEON_ADD:
3832 add(vf, rd, rn, rm);
3833 break;
3834 case NEON_ADDP:
3835 addp(vf, rd, rn, rm);
3836 break;
3837 case NEON_CMEQ:
3838 cmp(vf, rd, rn, rm, eq);
3839 break;
3840 case NEON_CMGE:
3841 cmp(vf, rd, rn, rm, ge);
3842 break;
3843 case NEON_CMGT:
3844 cmp(vf, rd, rn, rm, gt);
3845 break;
3846 case NEON_CMHI:
3847 cmp(vf, rd, rn, rm, hi);
3848 break;
3849 case NEON_CMHS:
3850 cmp(vf, rd, rn, rm, hs);
3851 break;
3852 case NEON_CMTST:
3853 cmptst(vf, rd, rn, rm);
3854 break;
3855 case NEON_MLS:
3856 mls(vf, rd, rn, rm);
3857 break;
3858 case NEON_MLA:
3859 mla(vf, rd, rn, rm);
3860 break;
3861 case NEON_MUL:
3862 mul(vf, rd, rn, rm);
3863 break;
3864 case NEON_PMUL:
3865 pmul(vf, rd, rn, rm);
3866 break;
3867 case NEON_SMAX:
3868 smax(vf, rd, rn, rm);
3869 break;
3870 case NEON_SMAXP:
3871 smaxp(vf, rd, rn, rm);
3872 break;
3873 case NEON_SMIN:
3874 smin(vf, rd, rn, rm);
3875 break;
3876 case NEON_SMINP:
3877 sminp(vf, rd, rn, rm);
3878 break;
3879 case NEON_SUB:
3880 sub(vf, rd, rn, rm);
3881 break;
3882 case NEON_UMAX:
3883 umax(vf, rd, rn, rm);
3884 break;
3885 case NEON_UMAXP:
3886 umaxp(vf, rd, rn, rm);
3887 break;
3888 case NEON_UMIN:
3889 umin(vf, rd, rn, rm);
3890 break;
3891 case NEON_UMINP:
3892 uminp(vf, rd, rn, rm);
3893 break;
3894 case NEON_SSHL:
3895 sshl(vf, rd, rn, rm);
3896 break;
3897 case NEON_USHL:
3898 ushl(vf, rd, rn, rm);
3899 break;
3900 case NEON_SABD:
3901 AbsDiff(vf, rd, rn, rm, true);
3902 break;
3903 case NEON_UABD:
3904 AbsDiff(vf, rd, rn, rm, false);
3905 break;
3906 case NEON_SABA:
3907 saba(vf, rd, rn, rm);
3908 break;
3909 case NEON_UABA:
3910 uaba(vf, rd, rn, rm);
3911 break;
3912 case NEON_UQADD:
3913 add(vf, rd, rn, rm).UnsignedSaturate(vf);
3914 break;
3915 case NEON_SQADD:
3916 add(vf, rd, rn, rm).SignedSaturate(vf);
3917 break;
3918 case NEON_UQSUB:
3919 sub(vf, rd, rn, rm).UnsignedSaturate(vf);
3920 break;
3921 case NEON_SQSUB:
3922 sub(vf, rd, rn, rm).SignedSaturate(vf);
3923 break;
3924 case NEON_SQDMULH:
3925 sqdmulh(vf, rd, rn, rm);
3926 break;
3927 case NEON_SQRDMULH:
3928 sqrdmulh(vf, rd, rn, rm);
3929 break;
3930 case NEON_UQSHL:
3931 ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
3932 break;
3933 case NEON_SQSHL:
3934 sshl(vf, rd, rn, rm).SignedSaturate(vf);
3935 break;
3936 case NEON_URSHL:
3937 ushl(vf, rd, rn, rm).Round(vf);
3938 break;
3939 case NEON_SRSHL:
3940 sshl(vf, rd, rn, rm).Round(vf);
3941 break;
3942 case NEON_UQRSHL:
3943 ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
3944 break;
3945 case NEON_SQRSHL:
3946 sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
3947 break;
3948 case NEON_UHADD:
3949 add(vf, rd, rn, rm).Uhalve(vf);
3950 break;
3951 case NEON_URHADD:
3952 add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
3953 break;
3954 case NEON_SHADD:
3955 add(vf, rd, rn, rm).Halve(vf);
3956 break;
3957 case NEON_SRHADD:
3958 add(vf, rd, rn, rm).Halve(vf).Round(vf);
3959 break;
3960 case NEON_UHSUB:
3961 sub(vf, rd, rn, rm).Uhalve(vf);
3962 break;
3963 case NEON_SHSUB:
3964 sub(vf, rd, rn, rm).Halve(vf);
3965 break;
3966 default:
3967 UNIMPLEMENTED();
3968 }
3969 }
3970 }
3971
VisitNEON3Different(Instruction * instr)3972 void Simulator::VisitNEON3Different(Instruction* instr) {
3973 NEONFormatDecoder nfd(instr);
3974 VectorFormat vf = nfd.GetVectorFormat();
3975 VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
3976
3977 SimVRegister& rd = vreg(instr->Rd());
3978 SimVRegister& rn = vreg(instr->Rn());
3979 SimVRegister& rm = vreg(instr->Rm());
3980
3981 switch (instr->Mask(NEON3DifferentMask)) {
3982 case NEON_PMULL:
3983 pmull(vf_l, rd, rn, rm);
3984 break;
3985 case NEON_PMULL2:
3986 pmull2(vf_l, rd, rn, rm);
3987 break;
3988 case NEON_UADDL:
3989 uaddl(vf_l, rd, rn, rm);
3990 break;
3991 case NEON_UADDL2:
3992 uaddl2(vf_l, rd, rn, rm);
3993 break;
3994 case NEON_SADDL:
3995 saddl(vf_l, rd, rn, rm);
3996 break;
3997 case NEON_SADDL2:
3998 saddl2(vf_l, rd, rn, rm);
3999 break;
4000 case NEON_USUBL:
4001 usubl(vf_l, rd, rn, rm);
4002 break;
4003 case NEON_USUBL2:
4004 usubl2(vf_l, rd, rn, rm);
4005 break;
4006 case NEON_SSUBL:
4007 ssubl(vf_l, rd, rn, rm);
4008 break;
4009 case NEON_SSUBL2:
4010 ssubl2(vf_l, rd, rn, rm);
4011 break;
4012 case NEON_SABAL:
4013 sabal(vf_l, rd, rn, rm);
4014 break;
4015 case NEON_SABAL2:
4016 sabal2(vf_l, rd, rn, rm);
4017 break;
4018 case NEON_UABAL:
4019 uabal(vf_l, rd, rn, rm);
4020 break;
4021 case NEON_UABAL2:
4022 uabal2(vf_l, rd, rn, rm);
4023 break;
4024 case NEON_SABDL:
4025 sabdl(vf_l, rd, rn, rm);
4026 break;
4027 case NEON_SABDL2:
4028 sabdl2(vf_l, rd, rn, rm);
4029 break;
4030 case NEON_UABDL:
4031 uabdl(vf_l, rd, rn, rm);
4032 break;
4033 case NEON_UABDL2:
4034 uabdl2(vf_l, rd, rn, rm);
4035 break;
4036 case NEON_SMLAL:
4037 smlal(vf_l, rd, rn, rm);
4038 break;
4039 case NEON_SMLAL2:
4040 smlal2(vf_l, rd, rn, rm);
4041 break;
4042 case NEON_UMLAL:
4043 umlal(vf_l, rd, rn, rm);
4044 break;
4045 case NEON_UMLAL2:
4046 umlal2(vf_l, rd, rn, rm);
4047 break;
4048 case NEON_SMLSL:
4049 smlsl(vf_l, rd, rn, rm);
4050 break;
4051 case NEON_SMLSL2:
4052 smlsl2(vf_l, rd, rn, rm);
4053 break;
4054 case NEON_UMLSL:
4055 umlsl(vf_l, rd, rn, rm);
4056 break;
4057 case NEON_UMLSL2:
4058 umlsl2(vf_l, rd, rn, rm);
4059 break;
4060 case NEON_SMULL:
4061 smull(vf_l, rd, rn, rm);
4062 break;
4063 case NEON_SMULL2:
4064 smull2(vf_l, rd, rn, rm);
4065 break;
4066 case NEON_UMULL:
4067 umull(vf_l, rd, rn, rm);
4068 break;
4069 case NEON_UMULL2:
4070 umull2(vf_l, rd, rn, rm);
4071 break;
4072 case NEON_SQDMLAL:
4073 sqdmlal(vf_l, rd, rn, rm);
4074 break;
4075 case NEON_SQDMLAL2:
4076 sqdmlal2(vf_l, rd, rn, rm);
4077 break;
4078 case NEON_SQDMLSL:
4079 sqdmlsl(vf_l, rd, rn, rm);
4080 break;
4081 case NEON_SQDMLSL2:
4082 sqdmlsl2(vf_l, rd, rn, rm);
4083 break;
4084 case NEON_SQDMULL:
4085 sqdmull(vf_l, rd, rn, rm);
4086 break;
4087 case NEON_SQDMULL2:
4088 sqdmull2(vf_l, rd, rn, rm);
4089 break;
4090 case NEON_UADDW:
4091 uaddw(vf_l, rd, rn, rm);
4092 break;
4093 case NEON_UADDW2:
4094 uaddw2(vf_l, rd, rn, rm);
4095 break;
4096 case NEON_SADDW:
4097 saddw(vf_l, rd, rn, rm);
4098 break;
4099 case NEON_SADDW2:
4100 saddw2(vf_l, rd, rn, rm);
4101 break;
4102 case NEON_USUBW:
4103 usubw(vf_l, rd, rn, rm);
4104 break;
4105 case NEON_USUBW2:
4106 usubw2(vf_l, rd, rn, rm);
4107 break;
4108 case NEON_SSUBW:
4109 ssubw(vf_l, rd, rn, rm);
4110 break;
4111 case NEON_SSUBW2:
4112 ssubw2(vf_l, rd, rn, rm);
4113 break;
4114 case NEON_ADDHN:
4115 addhn(vf, rd, rn, rm);
4116 break;
4117 case NEON_ADDHN2:
4118 addhn2(vf, rd, rn, rm);
4119 break;
4120 case NEON_RADDHN:
4121 raddhn(vf, rd, rn, rm);
4122 break;
4123 case NEON_RADDHN2:
4124 raddhn2(vf, rd, rn, rm);
4125 break;
4126 case NEON_SUBHN:
4127 subhn(vf, rd, rn, rm);
4128 break;
4129 case NEON_SUBHN2:
4130 subhn2(vf, rd, rn, rm);
4131 break;
4132 case NEON_RSUBHN:
4133 rsubhn(vf, rd, rn, rm);
4134 break;
4135 case NEON_RSUBHN2:
4136 rsubhn2(vf, rd, rn, rm);
4137 break;
4138 default:
4139 UNIMPLEMENTED();
4140 }
4141 }
4142
VisitNEONAcrossLanes(Instruction * instr)4143 void Simulator::VisitNEONAcrossLanes(Instruction* instr) {
4144 NEONFormatDecoder nfd(instr);
4145
4146 SimVRegister& rd = vreg(instr->Rd());
4147 SimVRegister& rn = vreg(instr->Rn());
4148
4149 // The input operand's VectorFormat is passed for these instructions.
4150 if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
4151 VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
4152
4153 switch (instr->Mask(NEONAcrossLanesFPMask)) {
4154 case NEON_FMAXV:
4155 fmaxv(vf, rd, rn);
4156 break;
4157 case NEON_FMINV:
4158 fminv(vf, rd, rn);
4159 break;
4160 case NEON_FMAXNMV:
4161 fmaxnmv(vf, rd, rn);
4162 break;
4163 case NEON_FMINNMV:
4164 fminnmv(vf, rd, rn);
4165 break;
4166 default:
4167 UNIMPLEMENTED();
4168 }
4169 } else {
4170 VectorFormat vf = nfd.GetVectorFormat();
4171
4172 switch (instr->Mask(NEONAcrossLanesMask)) {
4173 case NEON_ADDV:
4174 addv(vf, rd, rn);
4175 break;
4176 case NEON_SMAXV:
4177 smaxv(vf, rd, rn);
4178 break;
4179 case NEON_SMINV:
4180 sminv(vf, rd, rn);
4181 break;
4182 case NEON_UMAXV:
4183 umaxv(vf, rd, rn);
4184 break;
4185 case NEON_UMINV:
4186 uminv(vf, rd, rn);
4187 break;
4188 case NEON_SADDLV:
4189 saddlv(vf, rd, rn);
4190 break;
4191 case NEON_UADDLV:
4192 uaddlv(vf, rd, rn);
4193 break;
4194 default:
4195 UNIMPLEMENTED();
4196 }
4197 }
4198 }
4199
VisitNEONByIndexedElement(Instruction * instr)4200 void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
4201 NEONFormatDecoder nfd(instr);
4202 VectorFormat vf_r = nfd.GetVectorFormat();
4203 VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
4204
4205 SimVRegister& rd = vreg(instr->Rd());
4206 SimVRegister& rn = vreg(instr->Rn());
4207
4208 ByElementOp Op = nullptr;
4209
4210 int rm_reg = instr->Rm();
4211 int index = (instr->NEONH() << 1) | instr->NEONL();
4212 if (instr->NEONSize() == 1) {
4213 rm_reg &= 0xF;
4214 index = (index << 1) | instr->NEONM();
4215 }
4216
4217 switch (instr->Mask(NEONByIndexedElementMask)) {
4218 case NEON_MUL_byelement:
4219 Op = &Simulator::mul;
4220 vf = vf_r;
4221 break;
4222 case NEON_MLA_byelement:
4223 Op = &Simulator::mla;
4224 vf = vf_r;
4225 break;
4226 case NEON_MLS_byelement:
4227 Op = &Simulator::mls;
4228 vf = vf_r;
4229 break;
4230 case NEON_SQDMULH_byelement:
4231 Op = &Simulator::sqdmulh;
4232 vf = vf_r;
4233 break;
4234 case NEON_SQRDMULH_byelement:
4235 Op = &Simulator::sqrdmulh;
4236 vf = vf_r;
4237 break;
4238 case NEON_SMULL_byelement:
4239 if (instr->Mask(NEON_Q)) {
4240 Op = &Simulator::smull2;
4241 } else {
4242 Op = &Simulator::smull;
4243 }
4244 break;
4245 case NEON_UMULL_byelement:
4246 if (instr->Mask(NEON_Q)) {
4247 Op = &Simulator::umull2;
4248 } else {
4249 Op = &Simulator::umull;
4250 }
4251 break;
4252 case NEON_SMLAL_byelement:
4253 if (instr->Mask(NEON_Q)) {
4254 Op = &Simulator::smlal2;
4255 } else {
4256 Op = &Simulator::smlal;
4257 }
4258 break;
4259 case NEON_UMLAL_byelement:
4260 if (instr->Mask(NEON_Q)) {
4261 Op = &Simulator::umlal2;
4262 } else {
4263 Op = &Simulator::umlal;
4264 }
4265 break;
4266 case NEON_SMLSL_byelement:
4267 if (instr->Mask(NEON_Q)) {
4268 Op = &Simulator::smlsl2;
4269 } else {
4270 Op = &Simulator::smlsl;
4271 }
4272 break;
4273 case NEON_UMLSL_byelement:
4274 if (instr->Mask(NEON_Q)) {
4275 Op = &Simulator::umlsl2;
4276 } else {
4277 Op = &Simulator::umlsl;
4278 }
4279 break;
4280 case NEON_SQDMULL_byelement:
4281 if (instr->Mask(NEON_Q)) {
4282 Op = &Simulator::sqdmull2;
4283 } else {
4284 Op = &Simulator::sqdmull;
4285 }
4286 break;
4287 case NEON_SQDMLAL_byelement:
4288 if (instr->Mask(NEON_Q)) {
4289 Op = &Simulator::sqdmlal2;
4290 } else {
4291 Op = &Simulator::sqdmlal;
4292 }
4293 break;
4294 case NEON_SQDMLSL_byelement:
4295 if (instr->Mask(NEON_Q)) {
4296 Op = &Simulator::sqdmlsl2;
4297 } else {
4298 Op = &Simulator::sqdmlsl;
4299 }
4300 break;
4301 default:
4302 index = instr->NEONH();
4303 if ((instr->FPType() & 1) == 0) {
4304 index = (index << 1) | instr->NEONL();
4305 }
4306
4307 vf = nfd.GetVectorFormat(nfd.FPFormatMap());
4308
4309 switch (instr->Mask(NEONByIndexedElementFPMask)) {
4310 case NEON_FMUL_byelement:
4311 Op = &Simulator::fmul;
4312 break;
4313 case NEON_FMLA_byelement:
4314 Op = &Simulator::fmla;
4315 break;
4316 case NEON_FMLS_byelement:
4317 Op = &Simulator::fmls;
4318 break;
4319 case NEON_FMULX_byelement:
4320 Op = &Simulator::fmulx;
4321 break;
4322 default:
4323 UNIMPLEMENTED();
4324 }
4325 }
4326
4327 (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
4328 }
4329
VisitNEONCopy(Instruction * instr)4330 void Simulator::VisitNEONCopy(Instruction* instr) {
4331 NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
4332 VectorFormat vf = nfd.GetVectorFormat();
4333
4334 SimVRegister& rd = vreg(instr->Rd());
4335 SimVRegister& rn = vreg(instr->Rn());
4336 int imm5 = instr->ImmNEON5();
4337 int lsb = LowestSetBitPosition(imm5);
4338 int reg_index = imm5 >> lsb;
4339
4340 if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
4341 int imm4 = instr->ImmNEON4();
4342 DCHECK_GE(lsb, 1);
4343 int rn_index = imm4 >> (lsb - 1);
4344 ins_element(vf, rd, reg_index, rn, rn_index);
4345 } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
4346 ins_immediate(vf, rd, reg_index, xreg(instr->Rn()));
4347 } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
4348 uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
4349 value &= MaxUintFromFormat(vf);
4350 set_xreg(instr->Rd(), value);
4351 } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
4352 int64_t value = LogicVRegister(rn).Int(vf, reg_index);
4353 if (instr->NEONQ()) {
4354 set_xreg(instr->Rd(), value);
4355 } else {
4356 DCHECK(is_int32(value));
4357 set_wreg(instr->Rd(), static_cast<int32_t>(value));
4358 }
4359 } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
4360 dup_element(vf, rd, rn, reg_index);
4361 } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
4362 dup_immediate(vf, rd, xreg(instr->Rn()));
4363 } else {
4364 UNIMPLEMENTED();
4365 }
4366 }
4367
VisitNEONExtract(Instruction * instr)4368 void Simulator::VisitNEONExtract(Instruction* instr) {
4369 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
4370 VectorFormat vf = nfd.GetVectorFormat();
4371 SimVRegister& rd = vreg(instr->Rd());
4372 SimVRegister& rn = vreg(instr->Rn());
4373 SimVRegister& rm = vreg(instr->Rm());
4374 if (instr->Mask(NEONExtractMask) == NEON_EXT) {
4375 int index = instr->ImmNEONExt();
4376 ext(vf, rd, rn, rm, index);
4377 } else {
4378 UNIMPLEMENTED();
4379 }
4380 }
4381
NEONLoadStoreMultiStructHelper(const Instruction * instr,AddrMode addr_mode)4382 void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
4383 AddrMode addr_mode) {
4384 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
4385 VectorFormat vf = nfd.GetVectorFormat();
4386
4387 uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer);
4388 int reg_size = RegisterSizeInBytesFromFormat(vf);
4389
4390 int reg[4];
4391 uint64_t addr[4];
4392 for (int i = 0; i < 4; i++) {
4393 reg[i] = (instr->Rt() + i) % kNumberOfVRegisters;
4394 addr[i] = addr_base + (i * reg_size);
4395 }
4396 int count = 1;
4397 bool log_read = true;
4398
4399 // Bit 23 determines whether this is an offset or post-index addressing mode.
4400 // In offset mode, bits 20 to 16 should be zero; these bits encode the
4401 // register of immediate in post-index mode.
4402 if ((instr->Bit(23) == 0) && (instr->Bits(20, 16) != 0)) {
4403 UNREACHABLE();
4404 }
4405
4406 // We use the PostIndex mask here, as it works in this case for both Offset
4407 // and PostIndex addressing.
4408 switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
4409 case NEON_LD1_4v:
4410 case NEON_LD1_4v_post:
4411 ld1(vf, vreg(reg[3]), addr[3]);
4412 count++;
4413 V8_FALLTHROUGH;
4414 case NEON_LD1_3v:
4415 case NEON_LD1_3v_post:
4416 ld1(vf, vreg(reg[2]), addr[2]);
4417 count++;
4418 V8_FALLTHROUGH;
4419 case NEON_LD1_2v:
4420 case NEON_LD1_2v_post:
4421 ld1(vf, vreg(reg[1]), addr[1]);
4422 count++;
4423 V8_FALLTHROUGH;
4424 case NEON_LD1_1v:
4425 case NEON_LD1_1v_post:
4426 ld1(vf, vreg(reg[0]), addr[0]);
4427 break;
4428 case NEON_ST1_4v:
4429 case NEON_ST1_4v_post:
4430 st1(vf, vreg(reg[3]), addr[3]);
4431 count++;
4432 V8_FALLTHROUGH;
4433 case NEON_ST1_3v:
4434 case NEON_ST1_3v_post:
4435 st1(vf, vreg(reg[2]), addr[2]);
4436 count++;
4437 V8_FALLTHROUGH;
4438 case NEON_ST1_2v:
4439 case NEON_ST1_2v_post:
4440 st1(vf, vreg(reg[1]), addr[1]);
4441 count++;
4442 V8_FALLTHROUGH;
4443 case NEON_ST1_1v:
4444 case NEON_ST1_1v_post:
4445 st1(vf, vreg(reg[0]), addr[0]);
4446 log_read = false;
4447 break;
4448 case NEON_LD2_post:
4449 case NEON_LD2:
4450 ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
4451 count = 2;
4452 break;
4453 case NEON_ST2:
4454 case NEON_ST2_post:
4455 st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
4456 count = 2;
4457 log_read = false;
4458 break;
4459 case NEON_LD3_post:
4460 case NEON_LD3:
4461 ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
4462 count = 3;
4463 break;
4464 case NEON_ST3:
4465 case NEON_ST3_post:
4466 st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
4467 count = 3;
4468 log_read = false;
4469 break;
4470 case NEON_LD4_post:
4471 case NEON_LD4:
4472 ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]);
4473 count = 4;
4474 break;
4475 case NEON_ST4:
4476 case NEON_ST4_post:
4477 st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]);
4478 count = 4;
4479 log_read = false;
4480 break;
4481 default:
4482 UNIMPLEMENTED();
4483 }
4484
4485 {
4486 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
4487 if (log_read) {
4488 local_monitor_.NotifyLoad();
4489 } else {
4490 local_monitor_.NotifyStore();
4491 global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
4492 }
4493 }
4494
4495 // Explicitly log the register update whilst we have type information.
4496 for (int i = 0; i < count; i++) {
4497 // For de-interleaving loads, only print the base address.
4498 int lane_size = LaneSizeInBytesFromFormat(vf);
4499 PrintRegisterFormat format = GetPrintRegisterFormatTryFP(
4500 GetPrintRegisterFormatForSize(reg_size, lane_size));
4501 if (log_read) {
4502 LogVRead(addr_base, reg[i], format);
4503 } else {
4504 LogVWrite(addr_base, reg[i], format);
4505 }
4506 }
4507
4508 if (addr_mode == PostIndex) {
4509 int rm = instr->Rm();
4510 // The immediate post index addressing mode is indicated by rm = 31.
4511 // The immediate is implied by the number of vector registers used.
4512 addr_base +=
4513 (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count : xreg(rm);
4514 set_xreg(instr->Rn(), addr_base);
4515 } else {
4516 DCHECK_EQ(addr_mode, Offset);
4517 }
4518 }
4519
VisitNEONLoadStoreMultiStruct(Instruction * instr)4520 void Simulator::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
4521 NEONLoadStoreMultiStructHelper(instr, Offset);
4522 }
4523
VisitNEONLoadStoreMultiStructPostIndex(Instruction * instr)4524 void Simulator::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) {
4525 NEONLoadStoreMultiStructHelper(instr, PostIndex);
4526 }
4527
NEONLoadStoreSingleStructHelper(const Instruction * instr,AddrMode addr_mode)4528 void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
4529 AddrMode addr_mode) {
4530 uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer);
4531 int rt = instr->Rt();
4532
4533 // Bit 23 determines whether this is an offset or post-index addressing mode.
4534 // In offset mode, bits 20 to 16 should be zero; these bits encode the
4535 // register of immediate in post-index mode.
4536 DCHECK_IMPLIES(instr->Bit(23) == 0, instr->Bits(20, 16) == 0);
4537
4538 bool do_load = false;
4539
4540 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
4541 VectorFormat vf_t = nfd.GetVectorFormat();
4542
4543 VectorFormat vf = kFormat16B;
4544 // We use the PostIndex mask here, as it works in this case for both Offset
4545 // and PostIndex addressing.
4546 switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
4547 case NEON_LD1_b:
4548 case NEON_LD1_b_post:
4549 case NEON_LD2_b:
4550 case NEON_LD2_b_post:
4551 case NEON_LD3_b:
4552 case NEON_LD3_b_post:
4553 case NEON_LD4_b:
4554 case NEON_LD4_b_post:
4555 do_load = true;
4556 V8_FALLTHROUGH;
4557 case NEON_ST1_b:
4558 case NEON_ST1_b_post:
4559 case NEON_ST2_b:
4560 case NEON_ST2_b_post:
4561 case NEON_ST3_b:
4562 case NEON_ST3_b_post:
4563 case NEON_ST4_b:
4564 case NEON_ST4_b_post:
4565 break;
4566
4567 case NEON_LD1_h:
4568 case NEON_LD1_h_post:
4569 case NEON_LD2_h:
4570 case NEON_LD2_h_post:
4571 case NEON_LD3_h:
4572 case NEON_LD3_h_post:
4573 case NEON_LD4_h:
4574 case NEON_LD4_h_post:
4575 do_load = true;
4576 V8_FALLTHROUGH;
4577 case NEON_ST1_h:
4578 case NEON_ST1_h_post:
4579 case NEON_ST2_h:
4580 case NEON_ST2_h_post:
4581 case NEON_ST3_h:
4582 case NEON_ST3_h_post:
4583 case NEON_ST4_h:
4584 case NEON_ST4_h_post:
4585 vf = kFormat8H;
4586 break;
4587
4588 case NEON_LD1_s:
4589 case NEON_LD1_s_post:
4590 case NEON_LD2_s:
4591 case NEON_LD2_s_post:
4592 case NEON_LD3_s:
4593 case NEON_LD3_s_post:
4594 case NEON_LD4_s:
4595 case NEON_LD4_s_post:
4596 do_load = true;
4597 V8_FALLTHROUGH;
4598 case NEON_ST1_s:
4599 case NEON_ST1_s_post:
4600 case NEON_ST2_s:
4601 case NEON_ST2_s_post:
4602 case NEON_ST3_s:
4603 case NEON_ST3_s_post:
4604 case NEON_ST4_s:
4605 case NEON_ST4_s_post: {
4606 static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d,
4607 "LSB of size distinguishes S and D registers.");
4608 static_assert(
4609 (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post,
4610 "LSB of size distinguishes S and D registers.");
4611 static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d,
4612 "LSB of size distinguishes S and D registers.");
4613 static_assert(
4614 (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post,
4615 "LSB of size distinguishes S and D registers.");
4616 vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
4617 break;
4618 }
4619
4620 case NEON_LD1R:
4621 case NEON_LD1R_post: {
4622 vf = vf_t;
4623 ld1r(vf, vreg(rt), addr);
4624 do_load = true;
4625 break;
4626 }
4627
4628 case NEON_LD2R:
4629 case NEON_LD2R_post: {
4630 vf = vf_t;
4631 int rt2 = (rt + 1) % kNumberOfVRegisters;
4632 ld2r(vf, vreg(rt), vreg(rt2), addr);
4633 do_load = true;
4634 break;
4635 }
4636
4637 case NEON_LD3R:
4638 case NEON_LD3R_post: {
4639 vf = vf_t;
4640 int rt2 = (rt + 1) % kNumberOfVRegisters;
4641 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
4642 ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr);
4643 do_load = true;
4644 break;
4645 }
4646
4647 case NEON_LD4R:
4648 case NEON_LD4R_post: {
4649 vf = vf_t;
4650 int rt2 = (rt + 1) % kNumberOfVRegisters;
4651 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
4652 int rt4 = (rt3 + 1) % kNumberOfVRegisters;
4653 ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr);
4654 do_load = true;
4655 break;
4656 }
4657 default:
4658 UNIMPLEMENTED();
4659 }
4660
4661 PrintRegisterFormat print_format =
4662 GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
4663 // Make sure that the print_format only includes a single lane.
4664 print_format =
4665 static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask);
4666
4667 int esize = LaneSizeInBytesFromFormat(vf);
4668 int index_shift = LaneSizeInBytesLog2FromFormat(vf);
4669 int lane = instr->NEONLSIndex(index_shift);
4670 int scale = 0;
4671 int rt2 = (rt + 1) % kNumberOfVRegisters;
4672 int rt3 = (rt2 + 1) % kNumberOfVRegisters;
4673 int rt4 = (rt3 + 1) % kNumberOfVRegisters;
4674 switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
4675 case NEONLoadStoreSingle1:
4676 scale = 1;
4677 if (do_load) {
4678 ld1(vf, vreg(rt), lane, addr);
4679 LogVRead(addr, rt, print_format, lane);
4680 } else {
4681 st1(vf, vreg(rt), lane, addr);
4682 LogVWrite(addr, rt, print_format, lane);
4683 }
4684 break;
4685 case NEONLoadStoreSingle2:
4686 scale = 2;
4687 if (do_load) {
4688 ld2(vf, vreg(rt), vreg(rt2), lane, addr);
4689 LogVRead(addr, rt, print_format, lane);
4690 LogVRead(addr + esize, rt2, print_format, lane);
4691 } else {
4692 st2(vf, vreg(rt), vreg(rt2), lane, addr);
4693 LogVWrite(addr, rt, print_format, lane);
4694 LogVWrite(addr + esize, rt2, print_format, lane);
4695 }
4696 break;
4697 case NEONLoadStoreSingle3:
4698 scale = 3;
4699 if (do_load) {
4700 ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
4701 LogVRead(addr, rt, print_format, lane);
4702 LogVRead(addr + esize, rt2, print_format, lane);
4703 LogVRead(addr + (2 * esize), rt3, print_format, lane);
4704 } else {
4705 st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
4706 LogVWrite(addr, rt, print_format, lane);
4707 LogVWrite(addr + esize, rt2, print_format, lane);
4708 LogVWrite(addr + (2 * esize), rt3, print_format, lane);
4709 }
4710 break;
4711 case NEONLoadStoreSingle4:
4712 scale = 4;
4713 if (do_load) {
4714 ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
4715 LogVRead(addr, rt, print_format, lane);
4716 LogVRead(addr + esize, rt2, print_format, lane);
4717 LogVRead(addr + (2 * esize), rt3, print_format, lane);
4718 LogVRead(addr + (3 * esize), rt4, print_format, lane);
4719 } else {
4720 st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
4721 LogVWrite(addr, rt, print_format, lane);
4722 LogVWrite(addr + esize, rt2, print_format, lane);
4723 LogVWrite(addr + (2 * esize), rt3, print_format, lane);
4724 LogVWrite(addr + (3 * esize), rt4, print_format, lane);
4725 }
4726 break;
4727 default:
4728 UNIMPLEMENTED();
4729 }
4730
4731 {
4732 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
4733 if (do_load) {
4734 local_monitor_.NotifyLoad();
4735 } else {
4736 local_monitor_.NotifyStore();
4737 global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_);
4738 }
4739 }
4740
4741 if (addr_mode == PostIndex) {
4742 int rm = instr->Rm();
4743 int lane_size = LaneSizeInBytesFromFormat(vf);
4744 set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm)));
4745 }
4746 }
4747
VisitNEONLoadStoreSingleStruct(Instruction * instr)4748 void Simulator::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
4749 NEONLoadStoreSingleStructHelper(instr, Offset);
4750 }
4751
VisitNEONLoadStoreSingleStructPostIndex(Instruction * instr)4752 void Simulator::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) {
4753 NEONLoadStoreSingleStructHelper(instr, PostIndex);
4754 }
4755
VisitNEONModifiedImmediate(Instruction * instr)4756 void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
4757 SimVRegister& rd = vreg(instr->Rd());
4758 int cmode = instr->NEONCmode();
4759 int cmode_3_1 = (cmode >> 1) & 7;
4760 int cmode_3 = (cmode >> 3) & 1;
4761 int cmode_2 = (cmode >> 2) & 1;
4762 int cmode_1 = (cmode >> 1) & 1;
4763 int cmode_0 = cmode & 1;
4764 int q = instr->NEONQ();
4765 int op_bit = instr->NEONModImmOp();
4766 uint64_t imm8 = instr->ImmNEONabcdefgh();
4767
4768 // Find the format and immediate value
4769 uint64_t imm = 0;
4770 VectorFormat vform = kFormatUndefined;
4771 switch (cmode_3_1) {
4772 case 0x0:
4773 case 0x1:
4774 case 0x2:
4775 case 0x3:
4776 vform = (q == 1) ? kFormat4S : kFormat2S;
4777 imm = imm8 << (8 * cmode_3_1);
4778 break;
4779 case 0x4:
4780 case 0x5:
4781 vform = (q == 1) ? kFormat8H : kFormat4H;
4782 imm = imm8 << (8 * cmode_1);
4783 break;
4784 case 0x6:
4785 vform = (q == 1) ? kFormat4S : kFormat2S;
4786 if (cmode_0 == 0) {
4787 imm = imm8 << 8 | 0x000000FF;
4788 } else {
4789 imm = imm8 << 16 | 0x0000FFFF;
4790 }
4791 break;
4792 case 0x7:
4793 if (cmode_0 == 0 && op_bit == 0) {
4794 vform = q ? kFormat16B : kFormat8B;
4795 imm = imm8;
4796 } else if (cmode_0 == 0 && op_bit == 1) {
4797 vform = q ? kFormat2D : kFormat1D;
4798 imm = 0;
4799 for (int i = 0; i < 8; ++i) {
4800 if (imm8 & (1 << i)) {
4801 imm |= (UINT64_C(0xFF) << (8 * i));
4802 }
4803 }
4804 } else { // cmode_0 == 1, cmode == 0xF.
4805 if (op_bit == 0) {
4806 vform = q ? kFormat4S : kFormat2S;
4807 imm = bit_cast<uint32_t>(instr->ImmNEONFP32());
4808 } else if (q == 1) {
4809 vform = kFormat2D;
4810 imm = bit_cast<uint64_t>(instr->ImmNEONFP64());
4811 } else {
4812 DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xF));
4813 VisitUnallocated(instr);
4814 }
4815 }
4816 break;
4817 default:
4818 UNREACHABLE();
4819 }
4820
4821 // Find the operation.
4822 NEONModifiedImmediateOp op;
4823 if (cmode_3 == 0) {
4824 if (cmode_0 == 0) {
4825 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
4826 } else { // cmode<0> == '1'
4827 op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
4828 }
4829 } else { // cmode<3> == '1'
4830 if (cmode_2 == 0) {
4831 if (cmode_0 == 0) {
4832 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
4833 } else { // cmode<0> == '1'
4834 op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
4835 }
4836 } else { // cmode<2> == '1'
4837 if (cmode_1 == 0) {
4838 op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
4839 } else { // cmode<1> == '1'
4840 if (cmode_0 == 0) {
4841 op = NEONModifiedImmediate_MOVI;
4842 } else { // cmode<0> == '1'
4843 op = NEONModifiedImmediate_MOVI;
4844 }
4845 }
4846 }
4847 }
4848
4849 // Call the logic function.
4850 switch (op) {
4851 case NEONModifiedImmediate_ORR:
4852 orr(vform, rd, rd, imm);
4853 break;
4854 case NEONModifiedImmediate_BIC:
4855 bic(vform, rd, rd, imm);
4856 break;
4857 case NEONModifiedImmediate_MOVI:
4858 movi(vform, rd, imm);
4859 break;
4860 case NEONModifiedImmediate_MVNI:
4861 mvni(vform, rd, imm);
4862 break;
4863 default:
4864 VisitUnimplemented(instr);
4865 }
4866 }
4867
VisitNEONScalar2RegMisc(Instruction * instr)4868 void Simulator::VisitNEONScalar2RegMisc(Instruction* instr) {
4869 NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
4870 VectorFormat vf = nfd.GetVectorFormat();
4871
4872 SimVRegister& rd = vreg(instr->Rd());
4873 SimVRegister& rn = vreg(instr->Rn());
4874
4875 if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
4876 // These instructions all use a two bit size field, except NOT and RBIT,
4877 // which use the field to encode the operation.
4878 switch (instr->Mask(NEONScalar2RegMiscMask)) {
4879 case NEON_CMEQ_zero_scalar:
4880 cmp(vf, rd, rn, 0, eq);
4881 break;
4882 case NEON_CMGE_zero_scalar:
4883 cmp(vf, rd, rn, 0, ge);
4884 break;
4885 case NEON_CMGT_zero_scalar:
4886 cmp(vf, rd, rn, 0, gt);
4887 break;
4888 case NEON_CMLT_zero_scalar:
4889 cmp(vf, rd, rn, 0, lt);
4890 break;
4891 case NEON_CMLE_zero_scalar:
4892 cmp(vf, rd, rn, 0, le);
4893 break;
4894 case NEON_ABS_scalar:
4895 abs(vf, rd, rn);
4896 break;
4897 case NEON_SQABS_scalar:
4898 abs(vf, rd, rn).SignedSaturate(vf);
4899 break;
4900 case NEON_NEG_scalar:
4901 neg(vf, rd, rn);
4902 break;
4903 case NEON_SQNEG_scalar:
4904 neg(vf, rd, rn).SignedSaturate(vf);
4905 break;
4906 case NEON_SUQADD_scalar:
4907 suqadd(vf, rd, rn);
4908 break;
4909 case NEON_USQADD_scalar:
4910 usqadd(vf, rd, rn);
4911 break;
4912 default:
4913 UNIMPLEMENTED();
4914 break;
4915 }
4916 } else {
4917 VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
4918 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
4919
4920 // These instructions all use a one bit size field, except SQXTUN, SQXTN
4921 // and UQXTN, which use a two bit size field.
4922 switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
4923 case NEON_FRECPE_scalar:
4924 frecpe(fpf, rd, rn, fpcr_rounding);
4925 break;
4926 case NEON_FRECPX_scalar:
4927 frecpx(fpf, rd, rn);
4928 break;
4929 case NEON_FRSQRTE_scalar:
4930 frsqrte(fpf, rd, rn);
4931 break;
4932 case NEON_FCMGT_zero_scalar:
4933 fcmp_zero(fpf, rd, rn, gt);
4934 break;
4935 case NEON_FCMGE_zero_scalar:
4936 fcmp_zero(fpf, rd, rn, ge);
4937 break;
4938 case NEON_FCMEQ_zero_scalar:
4939 fcmp_zero(fpf, rd, rn, eq);
4940 break;
4941 case NEON_FCMLE_zero_scalar:
4942 fcmp_zero(fpf, rd, rn, le);
4943 break;
4944 case NEON_FCMLT_zero_scalar:
4945 fcmp_zero(fpf, rd, rn, lt);
4946 break;
4947 case NEON_SCVTF_scalar:
4948 scvtf(fpf, rd, rn, 0, fpcr_rounding);
4949 break;
4950 case NEON_UCVTF_scalar:
4951 ucvtf(fpf, rd, rn, 0, fpcr_rounding);
4952 break;
4953 case NEON_FCVTNS_scalar:
4954 fcvts(fpf, rd, rn, FPTieEven);
4955 break;
4956 case NEON_FCVTNU_scalar:
4957 fcvtu(fpf, rd, rn, FPTieEven);
4958 break;
4959 case NEON_FCVTPS_scalar:
4960 fcvts(fpf, rd, rn, FPPositiveInfinity);
4961 break;
4962 case NEON_FCVTPU_scalar:
4963 fcvtu(fpf, rd, rn, FPPositiveInfinity);
4964 break;
4965 case NEON_FCVTMS_scalar:
4966 fcvts(fpf, rd, rn, FPNegativeInfinity);
4967 break;
4968 case NEON_FCVTMU_scalar:
4969 fcvtu(fpf, rd, rn, FPNegativeInfinity);
4970 break;
4971 case NEON_FCVTZS_scalar:
4972 fcvts(fpf, rd, rn, FPZero);
4973 break;
4974 case NEON_FCVTZU_scalar:
4975 fcvtu(fpf, rd, rn, FPZero);
4976 break;
4977 case NEON_FCVTAS_scalar:
4978 fcvts(fpf, rd, rn, FPTieAway);
4979 break;
4980 case NEON_FCVTAU_scalar:
4981 fcvtu(fpf, rd, rn, FPTieAway);
4982 break;
4983 case NEON_FCVTXN_scalar:
4984 // Unlike all of the other FP instructions above, fcvtxn encodes dest
4985 // size S as size<0>=1. There's only one case, so we ignore the form.
4986 DCHECK_EQ(instr->Bit(22), 1);
4987 fcvtxn(kFormatS, rd, rn);
4988 break;
4989 default:
4990 switch (instr->Mask(NEONScalar2RegMiscMask)) {
4991 case NEON_SQXTN_scalar:
4992 sqxtn(vf, rd, rn);
4993 break;
4994 case NEON_UQXTN_scalar:
4995 uqxtn(vf, rd, rn);
4996 break;
4997 case NEON_SQXTUN_scalar:
4998 sqxtun(vf, rd, rn);
4999 break;
5000 default:
5001 UNIMPLEMENTED();
5002 }
5003 }
5004 }
5005 }
5006
VisitNEONScalar3Diff(Instruction * instr)5007 void Simulator::VisitNEONScalar3Diff(Instruction* instr) {
5008 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
5009 VectorFormat vf = nfd.GetVectorFormat();
5010
5011 SimVRegister& rd = vreg(instr->Rd());
5012 SimVRegister& rn = vreg(instr->Rn());
5013 SimVRegister& rm = vreg(instr->Rm());
5014 switch (instr->Mask(NEONScalar3DiffMask)) {
5015 case NEON_SQDMLAL_scalar:
5016 sqdmlal(vf, rd, rn, rm);
5017 break;
5018 case NEON_SQDMLSL_scalar:
5019 sqdmlsl(vf, rd, rn, rm);
5020 break;
5021 case NEON_SQDMULL_scalar:
5022 sqdmull(vf, rd, rn, rm);
5023 break;
5024 default:
5025 UNIMPLEMENTED();
5026 }
5027 }
5028
VisitNEONScalar3Same(Instruction * instr)5029 void Simulator::VisitNEONScalar3Same(Instruction* instr) {
5030 NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
5031 VectorFormat vf = nfd.GetVectorFormat();
5032
5033 SimVRegister& rd = vreg(instr->Rd());
5034 SimVRegister& rn = vreg(instr->Rn());
5035 SimVRegister& rm = vreg(instr->Rm());
5036
5037 if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
5038 vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
5039 switch (instr->Mask(NEONScalar3SameFPMask)) {
5040 case NEON_FMULX_scalar:
5041 fmulx(vf, rd, rn, rm);
5042 break;
5043 case NEON_FACGE_scalar:
5044 fabscmp(vf, rd, rn, rm, ge);
5045 break;
5046 case NEON_FACGT_scalar:
5047 fabscmp(vf, rd, rn, rm, gt);
5048 break;
5049 case NEON_FCMEQ_scalar:
5050 fcmp(vf, rd, rn, rm, eq);
5051 break;
5052 case NEON_FCMGE_scalar:
5053 fcmp(vf, rd, rn, rm, ge);
5054 break;
5055 case NEON_FCMGT_scalar:
5056 fcmp(vf, rd, rn, rm, gt);
5057 break;
5058 case NEON_FRECPS_scalar:
5059 frecps(vf, rd, rn, rm);
5060 break;
5061 case NEON_FRSQRTS_scalar:
5062 frsqrts(vf, rd, rn, rm);
5063 break;
5064 case NEON_FABD_scalar:
5065 fabd(vf, rd, rn, rm);
5066 break;
5067 default:
5068 UNIMPLEMENTED();
5069 }
5070 } else {
5071 switch (instr->Mask(NEONScalar3SameMask)) {
5072 case NEON_ADD_scalar:
5073 add(vf, rd, rn, rm);
5074 break;
5075 case NEON_SUB_scalar:
5076 sub(vf, rd, rn, rm);
5077 break;
5078 case NEON_CMEQ_scalar:
5079 cmp(vf, rd, rn, rm, eq);
5080 break;
5081 case NEON_CMGE_scalar:
5082 cmp(vf, rd, rn, rm, ge);
5083 break;
5084 case NEON_CMGT_scalar:
5085 cmp(vf, rd, rn, rm, gt);
5086 break;
5087 case NEON_CMHI_scalar:
5088 cmp(vf, rd, rn, rm, hi);
5089 break;
5090 case NEON_CMHS_scalar:
5091 cmp(vf, rd, rn, rm, hs);
5092 break;
5093 case NEON_CMTST_scalar:
5094 cmptst(vf, rd, rn, rm);
5095 break;
5096 case NEON_USHL_scalar:
5097 ushl(vf, rd, rn, rm);
5098 break;
5099 case NEON_SSHL_scalar:
5100 sshl(vf, rd, rn, rm);
5101 break;
5102 case NEON_SQDMULH_scalar:
5103 sqdmulh(vf, rd, rn, rm);
5104 break;
5105 case NEON_SQRDMULH_scalar:
5106 sqrdmulh(vf, rd, rn, rm);
5107 break;
5108 case NEON_UQADD_scalar:
5109 add(vf, rd, rn, rm).UnsignedSaturate(vf);
5110 break;
5111 case NEON_SQADD_scalar:
5112 add(vf, rd, rn, rm).SignedSaturate(vf);
5113 break;
5114 case NEON_UQSUB_scalar:
5115 sub(vf, rd, rn, rm).UnsignedSaturate(vf);
5116 break;
5117 case NEON_SQSUB_scalar:
5118 sub(vf, rd, rn, rm).SignedSaturate(vf);
5119 break;
5120 case NEON_UQSHL_scalar:
5121 ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
5122 break;
5123 case NEON_SQSHL_scalar:
5124 sshl(vf, rd, rn, rm).SignedSaturate(vf);
5125 break;
5126 case NEON_URSHL_scalar:
5127 ushl(vf, rd, rn, rm).Round(vf);
5128 break;
5129 case NEON_SRSHL_scalar:
5130 sshl(vf, rd, rn, rm).Round(vf);
5131 break;
5132 case NEON_UQRSHL_scalar:
5133 ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
5134 break;
5135 case NEON_SQRSHL_scalar:
5136 sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
5137 break;
5138 default:
5139 UNIMPLEMENTED();
5140 }
5141 }
5142 }
5143
VisitNEONScalarByIndexedElement(Instruction * instr)5144 void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
5145 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
5146 VectorFormat vf = nfd.GetVectorFormat();
5147 VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap());
5148
5149 SimVRegister& rd = vreg(instr->Rd());
5150 SimVRegister& rn = vreg(instr->Rn());
5151 ByElementOp Op = nullptr;
5152
5153 int rm_reg = instr->Rm();
5154 int index = (instr->NEONH() << 1) | instr->NEONL();
5155 if (instr->NEONSize() == 1) {
5156 rm_reg &= 0xF;
5157 index = (index << 1) | instr->NEONM();
5158 }
5159
5160 switch (instr->Mask(NEONScalarByIndexedElementMask)) {
5161 case NEON_SQDMULL_byelement_scalar:
5162 Op = &Simulator::sqdmull;
5163 break;
5164 case NEON_SQDMLAL_byelement_scalar:
5165 Op = &Simulator::sqdmlal;
5166 break;
5167 case NEON_SQDMLSL_byelement_scalar:
5168 Op = &Simulator::sqdmlsl;
5169 break;
5170 case NEON_SQDMULH_byelement_scalar:
5171 Op = &Simulator::sqdmulh;
5172 vf = vf_r;
5173 break;
5174 case NEON_SQRDMULH_byelement_scalar:
5175 Op = &Simulator::sqrdmulh;
5176 vf = vf_r;
5177 break;
5178 default:
5179 vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
5180 index = instr->NEONH();
5181 if ((instr->FPType() & 1) == 0) {
5182 index = (index << 1) | instr->NEONL();
5183 }
5184 switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
5185 case NEON_FMUL_byelement_scalar:
5186 Op = &Simulator::fmul;
5187 break;
5188 case NEON_FMLA_byelement_scalar:
5189 Op = &Simulator::fmla;
5190 break;
5191 case NEON_FMLS_byelement_scalar:
5192 Op = &Simulator::fmls;
5193 break;
5194 case NEON_FMULX_byelement_scalar:
5195 Op = &Simulator::fmulx;
5196 break;
5197 default:
5198 UNIMPLEMENTED();
5199 }
5200 }
5201
5202 (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
5203 }
5204
VisitNEONScalarCopy(Instruction * instr)5205 void Simulator::VisitNEONScalarCopy(Instruction* instr) {
5206 NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
5207 VectorFormat vf = nfd.GetVectorFormat();
5208
5209 SimVRegister& rd = vreg(instr->Rd());
5210 SimVRegister& rn = vreg(instr->Rn());
5211
5212 if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
5213 int imm5 = instr->ImmNEON5();
5214 int lsb = LowestSetBitPosition(imm5);
5215 int rn_index = imm5 >> lsb;
5216 dup_element(vf, rd, rn, rn_index);
5217 } else {
5218 UNIMPLEMENTED();
5219 }
5220 }
5221
VisitNEONScalarPairwise(Instruction * instr)5222 void Simulator::VisitNEONScalarPairwise(Instruction* instr) {
5223 NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap());
5224 VectorFormat vf = nfd.GetVectorFormat();
5225
5226 SimVRegister& rd = vreg(instr->Rd());
5227 SimVRegister& rn = vreg(instr->Rn());
5228 switch (instr->Mask(NEONScalarPairwiseMask)) {
5229 case NEON_ADDP_scalar:
5230 addp(vf, rd, rn);
5231 break;
5232 case NEON_FADDP_scalar:
5233 faddp(vf, rd, rn);
5234 break;
5235 case NEON_FMAXP_scalar:
5236 fmaxp(vf, rd, rn);
5237 break;
5238 case NEON_FMAXNMP_scalar:
5239 fmaxnmp(vf, rd, rn);
5240 break;
5241 case NEON_FMINP_scalar:
5242 fminp(vf, rd, rn);
5243 break;
5244 case NEON_FMINNMP_scalar:
5245 fminnmp(vf, rd, rn);
5246 break;
5247 default:
5248 UNIMPLEMENTED();
5249 }
5250 }
5251
VisitNEONScalarShiftImmediate(Instruction * instr)5252 void Simulator::VisitNEONScalarShiftImmediate(Instruction* instr) {
5253 SimVRegister& rd = vreg(instr->Rd());
5254 SimVRegister& rn = vreg(instr->Rn());
5255 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
5256
5257 static const NEONFormatMap map = {
5258 {22, 21, 20, 19},
5259 {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, NF_D, NF_D, NF_D,
5260 NF_D, NF_D, NF_D, NF_D, NF_D}};
5261 NEONFormatDecoder nfd(instr, &map);
5262 VectorFormat vf = nfd.GetVectorFormat();
5263
5264 int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
5265 int immhimmb = instr->ImmNEONImmhImmb();
5266 int right_shift = (16 << highestSetBit) - immhimmb;
5267 int left_shift = immhimmb - (8 << highestSetBit);
5268 switch (instr->Mask(NEONScalarShiftImmediateMask)) {
5269 case NEON_SHL_scalar:
5270 shl(vf, rd, rn, left_shift);
5271 break;
5272 case NEON_SLI_scalar:
5273 sli(vf, rd, rn, left_shift);
5274 break;
5275 case NEON_SQSHL_imm_scalar:
5276 sqshl(vf, rd, rn, left_shift);
5277 break;
5278 case NEON_UQSHL_imm_scalar:
5279 uqshl(vf, rd, rn, left_shift);
5280 break;
5281 case NEON_SQSHLU_scalar:
5282 sqshlu(vf, rd, rn, left_shift);
5283 break;
5284 case NEON_SRI_scalar:
5285 sri(vf, rd, rn, right_shift);
5286 break;
5287 case NEON_SSHR_scalar:
5288 sshr(vf, rd, rn, right_shift);
5289 break;
5290 case NEON_USHR_scalar:
5291 ushr(vf, rd, rn, right_shift);
5292 break;
5293 case NEON_SRSHR_scalar:
5294 sshr(vf, rd, rn, right_shift).Round(vf);
5295 break;
5296 case NEON_URSHR_scalar:
5297 ushr(vf, rd, rn, right_shift).Round(vf);
5298 break;
5299 case NEON_SSRA_scalar:
5300 ssra(vf, rd, rn, right_shift);
5301 break;
5302 case NEON_USRA_scalar:
5303 usra(vf, rd, rn, right_shift);
5304 break;
5305 case NEON_SRSRA_scalar:
5306 srsra(vf, rd, rn, right_shift);
5307 break;
5308 case NEON_URSRA_scalar:
5309 ursra(vf, rd, rn, right_shift);
5310 break;
5311 case NEON_UQSHRN_scalar:
5312 uqshrn(vf, rd, rn, right_shift);
5313 break;
5314 case NEON_UQRSHRN_scalar:
5315 uqrshrn(vf, rd, rn, right_shift);
5316 break;
5317 case NEON_SQSHRN_scalar:
5318 sqshrn(vf, rd, rn, right_shift);
5319 break;
5320 case NEON_SQRSHRN_scalar:
5321 sqrshrn(vf, rd, rn, right_shift);
5322 break;
5323 case NEON_SQSHRUN_scalar:
5324 sqshrun(vf, rd, rn, right_shift);
5325 break;
5326 case NEON_SQRSHRUN_scalar:
5327 sqrshrun(vf, rd, rn, right_shift);
5328 break;
5329 case NEON_FCVTZS_imm_scalar:
5330 fcvts(vf, rd, rn, FPZero, right_shift);
5331 break;
5332 case NEON_FCVTZU_imm_scalar:
5333 fcvtu(vf, rd, rn, FPZero, right_shift);
5334 break;
5335 case NEON_SCVTF_imm_scalar:
5336 scvtf(vf, rd, rn, right_shift, fpcr_rounding);
5337 break;
5338 case NEON_UCVTF_imm_scalar:
5339 ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
5340 break;
5341 default:
5342 UNIMPLEMENTED();
5343 }
5344 }
5345
VisitNEONShiftImmediate(Instruction * instr)5346 void Simulator::VisitNEONShiftImmediate(Instruction* instr) {
5347 SimVRegister& rd = vreg(instr->Rd());
5348 SimVRegister& rn = vreg(instr->Rn());
5349 FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
5350
5351 // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
5352 // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
5353 static const NEONFormatMap map = {
5354 {22, 21, 20, 19, 30},
5355 {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
5356 NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
5357 NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
5358 NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}};
5359 NEONFormatDecoder nfd(instr, &map);
5360 VectorFormat vf = nfd.GetVectorFormat();
5361
5362 // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
5363 static const NEONFormatMap map_l = {
5364 {22, 21, 20, 19},
5365 {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}};
5366 VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
5367
5368 int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
5369 int immhimmb = instr->ImmNEONImmhImmb();
5370 int right_shift = (16 << highestSetBit) - immhimmb;
5371 int left_shift = immhimmb - (8 << highestSetBit);
5372
5373 switch (instr->Mask(NEONShiftImmediateMask)) {
5374 case NEON_SHL:
5375 shl(vf, rd, rn, left_shift);
5376 break;
5377 case NEON_SLI:
5378 sli(vf, rd, rn, left_shift);
5379 break;
5380 case NEON_SQSHLU:
5381 sqshlu(vf, rd, rn, left_shift);
5382 break;
5383 case NEON_SRI:
5384 sri(vf, rd, rn, right_shift);
5385 break;
5386 case NEON_SSHR:
5387 sshr(vf, rd, rn, right_shift);
5388 break;
5389 case NEON_USHR:
5390 ushr(vf, rd, rn, right_shift);
5391 break;
5392 case NEON_SRSHR:
5393 sshr(vf, rd, rn, right_shift).Round(vf);
5394 break;
5395 case NEON_URSHR:
5396 ushr(vf, rd, rn, right_shift).Round(vf);
5397 break;
5398 case NEON_SSRA:
5399 ssra(vf, rd, rn, right_shift);
5400 break;
5401 case NEON_USRA:
5402 usra(vf, rd, rn, right_shift);
5403 break;
5404 case NEON_SRSRA:
5405 srsra(vf, rd, rn, right_shift);
5406 break;
5407 case NEON_URSRA:
5408 ursra(vf, rd, rn, right_shift);
5409 break;
5410 case NEON_SQSHL_imm:
5411 sqshl(vf, rd, rn, left_shift);
5412 break;
5413 case NEON_UQSHL_imm:
5414 uqshl(vf, rd, rn, left_shift);
5415 break;
5416 case NEON_SCVTF_imm:
5417 scvtf(vf, rd, rn, right_shift, fpcr_rounding);
5418 break;
5419 case NEON_UCVTF_imm:
5420 ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
5421 break;
5422 case NEON_FCVTZS_imm:
5423 fcvts(vf, rd, rn, FPZero, right_shift);
5424 break;
5425 case NEON_FCVTZU_imm:
5426 fcvtu(vf, rd, rn, FPZero, right_shift);
5427 break;
5428 case NEON_SSHLL:
5429 vf = vf_l;
5430 if (instr->Mask(NEON_Q)) {
5431 sshll2(vf, rd, rn, left_shift);
5432 } else {
5433 sshll(vf, rd, rn, left_shift);
5434 }
5435 break;
5436 case NEON_USHLL:
5437 vf = vf_l;
5438 if (instr->Mask(NEON_Q)) {
5439 ushll2(vf, rd, rn, left_shift);
5440 } else {
5441 ushll(vf, rd, rn, left_shift);
5442 }
5443 break;
5444 case NEON_SHRN:
5445 if (instr->Mask(NEON_Q)) {
5446 shrn2(vf, rd, rn, right_shift);
5447 } else {
5448 shrn(vf, rd, rn, right_shift);
5449 }
5450 break;
5451 case NEON_RSHRN:
5452 if (instr->Mask(NEON_Q)) {
5453 rshrn2(vf, rd, rn, right_shift);
5454 } else {
5455 rshrn(vf, rd, rn, right_shift);
5456 }
5457 break;
5458 case NEON_UQSHRN:
5459 if (instr->Mask(NEON_Q)) {
5460 uqshrn2(vf, rd, rn, right_shift);
5461 } else {
5462 uqshrn(vf, rd, rn, right_shift);
5463 }
5464 break;
5465 case NEON_UQRSHRN:
5466 if (instr->Mask(NEON_Q)) {
5467 uqrshrn2(vf, rd, rn, right_shift);
5468 } else {
5469 uqrshrn(vf, rd, rn, right_shift);
5470 }
5471 break;
5472 case NEON_SQSHRN:
5473 if (instr->Mask(NEON_Q)) {
5474 sqshrn2(vf, rd, rn, right_shift);
5475 } else {
5476 sqshrn(vf, rd, rn, right_shift);
5477 }
5478 break;
5479 case NEON_SQRSHRN:
5480 if (instr->Mask(NEON_Q)) {
5481 sqrshrn2(vf, rd, rn, right_shift);
5482 } else {
5483 sqrshrn(vf, rd, rn, right_shift);
5484 }
5485 break;
5486 case NEON_SQSHRUN:
5487 if (instr->Mask(NEON_Q)) {
5488 sqshrun2(vf, rd, rn, right_shift);
5489 } else {
5490 sqshrun(vf, rd, rn, right_shift);
5491 }
5492 break;
5493 case NEON_SQRSHRUN:
5494 if (instr->Mask(NEON_Q)) {
5495 sqrshrun2(vf, rd, rn, right_shift);
5496 } else {
5497 sqrshrun(vf, rd, rn, right_shift);
5498 }
5499 break;
5500 default:
5501 UNIMPLEMENTED();
5502 }
5503 }
5504
VisitNEONTable(Instruction * instr)5505 void Simulator::VisitNEONTable(Instruction* instr) {
5506 NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
5507 VectorFormat vf = nfd.GetVectorFormat();
5508
5509 SimVRegister& rd = vreg(instr->Rd());
5510 SimVRegister& rn = vreg(instr->Rn());
5511 SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters);
5512 SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters);
5513 SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters);
5514 SimVRegister& rm = vreg(instr->Rm());
5515
5516 switch (instr->Mask(NEONTableMask)) {
5517 case NEON_TBL_1v:
5518 tbl(vf, rd, rn, rm);
5519 break;
5520 case NEON_TBL_2v:
5521 tbl(vf, rd, rn, rn2, rm);
5522 break;
5523 case NEON_TBL_3v:
5524 tbl(vf, rd, rn, rn2, rn3, rm);
5525 break;
5526 case NEON_TBL_4v:
5527 tbl(vf, rd, rn, rn2, rn3, rn4, rm);
5528 break;
5529 case NEON_TBX_1v:
5530 tbx(vf, rd, rn, rm);
5531 break;
5532 case NEON_TBX_2v:
5533 tbx(vf, rd, rn, rn2, rm);
5534 break;
5535 case NEON_TBX_3v:
5536 tbx(vf, rd, rn, rn2, rn3, rm);
5537 break;
5538 case NEON_TBX_4v:
5539 tbx(vf, rd, rn, rn2, rn3, rn4, rm);
5540 break;
5541 default:
5542 UNIMPLEMENTED();
5543 }
5544 }
5545
VisitNEONPerm(Instruction * instr)5546 void Simulator::VisitNEONPerm(Instruction* instr) {
5547 NEONFormatDecoder nfd(instr);
5548 VectorFormat vf = nfd.GetVectorFormat();
5549
5550 SimVRegister& rd = vreg(instr->Rd());
5551 SimVRegister& rn = vreg(instr->Rn());
5552 SimVRegister& rm = vreg(instr->Rm());
5553
5554 switch (instr->Mask(NEONPermMask)) {
5555 case NEON_TRN1:
5556 trn1(vf, rd, rn, rm);
5557 break;
5558 case NEON_TRN2:
5559 trn2(vf, rd, rn, rm);
5560 break;
5561 case NEON_UZP1:
5562 uzp1(vf, rd, rn, rm);
5563 break;
5564 case NEON_UZP2:
5565 uzp2(vf, rd, rn, rm);
5566 break;
5567 case NEON_ZIP1:
5568 zip1(vf, rd, rn, rm);
5569 break;
5570 case NEON_ZIP2:
5571 zip2(vf, rd, rn, rm);
5572 break;
5573 default:
5574 UNIMPLEMENTED();
5575 }
5576 }
5577
DoPrintf(Instruction * instr)5578 void Simulator::DoPrintf(Instruction* instr) {
5579 DCHECK((instr->Mask(ExceptionMask) == HLT) &&
5580 (instr->ImmException() == kImmExceptionIsPrintf));
5581
5582 // Read the arguments encoded inline in the instruction stream.
5583 uint32_t arg_count;
5584 uint32_t arg_pattern_list;
5585 STATIC_ASSERT(sizeof(*instr) == 1);
5586 memcpy(&arg_count,
5587 instr + kPrintfArgCountOffset,
5588 sizeof(arg_count));
5589 memcpy(&arg_pattern_list,
5590 instr + kPrintfArgPatternListOffset,
5591 sizeof(arg_pattern_list));
5592
5593 DCHECK_LE(arg_count, kPrintfMaxArgCount);
5594 DCHECK_EQ(arg_pattern_list >> (kPrintfArgPatternBits * arg_count), 0);
5595
5596 // We need to call the host printf function with a set of arguments defined by
5597 // arg_pattern_list. Because we don't know the types and sizes of the
5598 // arguments, this is very difficult to do in a robust and portable way. To
5599 // work around the problem, we pick apart the format string, and print one
5600 // format placeholder at a time.
5601
5602 // Allocate space for the format string. We take a copy, so we can modify it.
5603 // Leave enough space for one extra character per expected argument (plus the
5604 // '\0' termination).
5605 const char * format_base = reg<const char *>(0);
5606 DCHECK_NOT_NULL(format_base);
5607 size_t length = strlen(format_base) + 1;
5608 char * const format = new char[length + arg_count];
5609
5610 // A list of chunks, each with exactly one format placeholder.
5611 const char * chunks[kPrintfMaxArgCount];
5612
5613 // Copy the format string and search for format placeholders.
5614 uint32_t placeholder_count = 0;
5615 char * format_scratch = format;
5616 for (size_t i = 0; i < length; i++) {
5617 if (format_base[i] != '%') {
5618 *format_scratch++ = format_base[i];
5619 } else {
5620 if (format_base[i + 1] == '%') {
5621 // Ignore explicit "%%" sequences.
5622 *format_scratch++ = format_base[i];
5623
5624 if (placeholder_count == 0) {
5625 // The first chunk is passed to printf using "%s", so we need to
5626 // unescape "%%" sequences in this chunk. (Just skip the next '%'.)
5627 i++;
5628 } else {
5629 // Otherwise, pass through "%%" unchanged.
5630 *format_scratch++ = format_base[++i];
5631 }
5632 } else {
5633 CHECK(placeholder_count < arg_count);
5634 // Insert '\0' before placeholders, and store their locations.
5635 *format_scratch++ = '\0';
5636 chunks[placeholder_count++] = format_scratch;
5637 *format_scratch++ = format_base[i];
5638 }
5639 }
5640 }
5641 DCHECK(format_scratch <= (format + length + arg_count));
5642 CHECK(placeholder_count == arg_count);
5643
5644 // Finally, call printf with each chunk, passing the appropriate register
5645 // argument. Normally, printf returns the number of bytes transmitted, so we
5646 // can emulate a single printf call by adding the result from each chunk. If
5647 // any call returns a negative (error) value, though, just return that value.
5648
5649 fprintf(stream_, "%s", clr_printf);
5650
5651 // Because '\0' is inserted before each placeholder, the first string in
5652 // 'format' contains no format placeholders and should be printed literally.
5653 int result = fprintf(stream_, "%s", format);
5654 int pcs_r = 1; // Start at x1. x0 holds the format string.
5655 int pcs_f = 0; // Start at d0.
5656 if (result >= 0) {
5657 for (uint32_t i = 0; i < placeholder_count; i++) {
5658 int part_result = -1;
5659
5660 uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
5661 arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
5662 switch (arg_pattern) {
5663 case kPrintfArgW:
5664 part_result = fprintf(stream_, chunks[i], wreg(pcs_r++));
5665 break;
5666 case kPrintfArgX:
5667 part_result = fprintf(stream_, chunks[i], xreg(pcs_r++));
5668 break;
5669 case kPrintfArgD:
5670 part_result = fprintf(stream_, chunks[i], dreg(pcs_f++));
5671 break;
5672 default: UNREACHABLE();
5673 }
5674
5675 if (part_result < 0) {
5676 // Handle error values.
5677 result = part_result;
5678 break;
5679 }
5680
5681 result += part_result;
5682 }
5683 }
5684
5685 fprintf(stream_, "%s", clr_normal);
5686
5687 #ifdef DEBUG
5688 CorruptAllCallerSavedCPURegisters();
5689 #endif
5690
5691 // Printf returns its result in x0 (just like the C library's printf).
5692 set_xreg(0, result);
5693
5694 // The printf parameters are inlined in the code, so skip them.
5695 set_pc(instr->InstructionAtOffset(kPrintfLength));
5696
5697 // Set LR as if we'd just called a native printf function.
5698 set_lr(pc());
5699
5700 delete[] format;
5701 }
5702
LocalMonitor()5703 Simulator::LocalMonitor::LocalMonitor()
5704 : access_state_(MonitorAccess::Open),
5705 tagged_addr_(0),
5706 size_(TransactionSize::None) {}
5707
Clear()5708 void Simulator::LocalMonitor::Clear() {
5709 access_state_ = MonitorAccess::Open;
5710 tagged_addr_ = 0;
5711 size_ = TransactionSize::None;
5712 }
5713
NotifyLoad()5714 void Simulator::LocalMonitor::NotifyLoad() {
5715 if (access_state_ == MonitorAccess::Exclusive) {
5716 // A non exclusive load could clear the local monitor. As a result, it's
5717 // most strict to unconditionally clear the local monitor on load.
5718 Clear();
5719 }
5720 }
5721
NotifyLoadExcl(uintptr_t addr,TransactionSize size)5722 void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr,
5723 TransactionSize size) {
5724 access_state_ = MonitorAccess::Exclusive;
5725 tagged_addr_ = addr;
5726 size_ = size;
5727 }
5728
NotifyStore()5729 void Simulator::LocalMonitor::NotifyStore() {
5730 if (access_state_ == MonitorAccess::Exclusive) {
5731 // A non exclusive store could clear the local monitor. As a result, it's
5732 // most strict to unconditionally clear the local monitor on store.
5733 Clear();
5734 }
5735 }
5736
NotifyStoreExcl(uintptr_t addr,TransactionSize size)5737 bool Simulator::LocalMonitor::NotifyStoreExcl(uintptr_t addr,
5738 TransactionSize size) {
5739 if (access_state_ == MonitorAccess::Exclusive) {
5740 // It is allowed for a processor to require that the address matches
5741 // exactly (B2.10.1), so this comparison does not mask addr.
5742 if (addr == tagged_addr_ && size_ == size) {
5743 Clear();
5744 return true;
5745 } else {
5746 // It is implementation-defined whether an exclusive store to a
5747 // non-tagged address will update memory. As a result, it's most strict
5748 // to unconditionally clear the local monitor.
5749 Clear();
5750 return false;
5751 }
5752 } else {
5753 DCHECK(access_state_ == MonitorAccess::Open);
5754 return false;
5755 }
5756 }
5757
Processor()5758 Simulator::GlobalMonitor::Processor::Processor()
5759 : access_state_(MonitorAccess::Open),
5760 tagged_addr_(0),
5761 next_(nullptr),
5762 prev_(nullptr),
5763 failure_counter_(0) {}
5764
Clear_Locked()5765 void Simulator::GlobalMonitor::Processor::Clear_Locked() {
5766 access_state_ = MonitorAccess::Open;
5767 tagged_addr_ = 0;
5768 }
5769
NotifyLoadExcl_Locked(uintptr_t addr)5770 void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(
5771 uintptr_t addr) {
5772 access_state_ = MonitorAccess::Exclusive;
5773 tagged_addr_ = addr;
5774 }
5775
NotifyStore_Locked(bool is_requesting_processor)5776 void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
5777 bool is_requesting_processor) {
5778 if (access_state_ == MonitorAccess::Exclusive) {
5779 // A non exclusive store could clear the global monitor. As a result, it's
5780 // most strict to unconditionally clear global monitors on store.
5781 Clear_Locked();
5782 }
5783 }
5784
NotifyStoreExcl_Locked(uintptr_t addr,bool is_requesting_processor)5785 bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
5786 uintptr_t addr, bool is_requesting_processor) {
5787 if (access_state_ == MonitorAccess::Exclusive) {
5788 if (is_requesting_processor) {
5789 // It is allowed for a processor to require that the address matches
5790 // exactly (B2.10.2), so this comparison does not mask addr.
5791 if (addr == tagged_addr_) {
5792 Clear_Locked();
5793 // Introduce occasional stxr failures. This is to simulate the
5794 // behavior of hardware, which can randomly fail due to background
5795 // cache evictions.
5796 if (failure_counter_++ >= kMaxFailureCounter) {
5797 failure_counter_ = 0;
5798 return false;
5799 } else {
5800 return true;
5801 }
5802 }
5803 } else if ((addr & kExclusiveTaggedAddrMask) ==
5804 (tagged_addr_ & kExclusiveTaggedAddrMask)) {
5805 // Check the masked addresses when responding to a successful lock by
5806 // another processor so the implementation is more conservative (i.e. the
5807 // granularity of locking is as large as possible.)
5808 Clear_Locked();
5809 return false;
5810 }
5811 }
5812 return false;
5813 }
5814
GlobalMonitor()5815 Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
5816
NotifyLoadExcl_Locked(uintptr_t addr,Processor * processor)5817 void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr,
5818 Processor* processor) {
5819 processor->NotifyLoadExcl_Locked(addr);
5820 PrependProcessor_Locked(processor);
5821 }
5822
NotifyStore_Locked(Processor * processor)5823 void Simulator::GlobalMonitor::NotifyStore_Locked(Processor* processor) {
5824 // Notify each processor of the store operation.
5825 for (Processor* iter = head_; iter; iter = iter->next_) {
5826 bool is_requesting_processor = iter == processor;
5827 iter->NotifyStore_Locked(is_requesting_processor);
5828 }
5829 }
5830
NotifyStoreExcl_Locked(uintptr_t addr,Processor * processor)5831 bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(uintptr_t addr,
5832 Processor* processor) {
5833 DCHECK(IsProcessorInLinkedList_Locked(processor));
5834 if (processor->NotifyStoreExcl_Locked(addr, true)) {
5835 // Notify the other processors that this StoreExcl succeeded.
5836 for (Processor* iter = head_; iter; iter = iter->next_) {
5837 if (iter != processor) {
5838 iter->NotifyStoreExcl_Locked(addr, false);
5839 }
5840 }
5841 return true;
5842 } else {
5843 return false;
5844 }
5845 }
5846
IsProcessorInLinkedList_Locked(Processor * processor) const5847 bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
5848 Processor* processor) const {
5849 return head_ == processor || processor->next_ || processor->prev_;
5850 }
5851
PrependProcessor_Locked(Processor * processor)5852 void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
5853 if (IsProcessorInLinkedList_Locked(processor)) {
5854 return;
5855 }
5856
5857 if (head_) {
5858 head_->prev_ = processor;
5859 }
5860 processor->prev_ = nullptr;
5861 processor->next_ = head_;
5862 head_ = processor;
5863 }
5864
RemoveProcessor(Processor * processor)5865 void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
5866 base::LockGuard<base::Mutex> lock_guard(&mutex);
5867 if (!IsProcessorInLinkedList_Locked(processor)) {
5868 return;
5869 }
5870
5871 if (processor->prev_) {
5872 processor->prev_->next_ = processor->next_;
5873 } else {
5874 head_ = processor->next_;
5875 }
5876 if (processor->next_) {
5877 processor->next_->prev_ = processor->prev_;
5878 }
5879 processor->prev_ = nullptr;
5880 processor->next_ = nullptr;
5881 }
5882
5883 #endif // USE_SIMULATOR
5884
5885 } // namespace internal
5886 } // namespace v8
5887
5888 #endif // V8_TARGET_ARCH_ARM64
5889