1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/torque/csa-generator.h"
6
7 #include "src/common/globals.h"
8 #include "src/torque/global-context.h"
9 #include "src/torque/type-oracle.h"
10 #include "src/torque/types.h"
11 #include "src/torque/utils.h"
12
13 namespace v8 {
14 namespace internal {
15 namespace torque {
16
EmitGraph(Stack<std::string> parameters)17 base::Optional<Stack<std::string>> CSAGenerator::EmitGraph(
18 Stack<std::string> parameters) {
19 for (BottomOffset i = {0}; i < parameters.AboveTop(); ++i) {
20 SetDefinitionVariable(DefinitionLocation::Parameter(i.offset),
21 parameters.Peek(i));
22 }
23
24 for (Block* block : cfg_.blocks()) {
25 if (block->IsDead()) continue;
26
27 out() << " compiler::CodeAssemblerParameterizedLabel<";
28 bool first = true;
29 DCHECK_EQ(block->InputTypes().Size(), block->InputDefinitions().Size());
30 for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
31 if (block->InputDefinitions().Peek(i).IsPhiFromBlock(block)) {
32 if (!first) out() << ", ";
33 out() << block->InputTypes().Peek(i)->GetGeneratedTNodeTypeName();
34 first = false;
35 }
36 }
37 out() << "> " << BlockName(block) << "(&ca_, compiler::CodeAssemblerLabel::"
38 << (block->IsDeferred() ? "kDeferred" : "kNonDeferred") << ");\n";
39 }
40
41 EmitInstruction(GotoInstruction{cfg_.start()}, ¶meters);
42 for (Block* block : cfg_.blocks()) {
43 if (cfg_.end() && *cfg_.end() == block) continue;
44 if (block->IsDead()) continue;
45 out() << "\n";
46
47 // Redirect the output of non-declarations into a buffer and only output
48 // declarations right away.
49 std::stringstream out_buffer;
50 std::ostream* old_out = out_;
51 out_ = &out_buffer;
52
53 out() << " if (" << BlockName(block) << ".is_used()) {\n";
54 EmitBlock(block);
55 out() << " }\n";
56
57 // All declarations have been printed now, so we can append the buffered
58 // output and redirect back to the original output stream.
59 out_ = old_out;
60 out() << out_buffer.str();
61 }
62 if (cfg_.end()) {
63 out() << "\n";
64 return EmitBlock(*cfg_.end());
65 }
66 return base::nullopt;
67 }
68
EmitBlock(const Block * block)69 Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
70 Stack<std::string> stack;
71 std::stringstream phi_names;
72
73 for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
74 const auto& def = block->InputDefinitions().Peek(i);
75 stack.Push(DefinitionToVariable(def));
76 if (def.IsPhiFromBlock(block)) {
77 decls() << " TNode<"
78 << block->InputTypes().Peek(i)->GetGeneratedTNodeTypeName()
79 << "> " << stack.Top() << ";\n";
80 phi_names << ", &" << stack.Top();
81 }
82 }
83 out() << " ca_.Bind(&" << BlockName(block) << phi_names.str() << ");\n";
84
85 for (const Instruction& instruction : block->instructions()) {
86 TorqueCodeGenerator::EmitInstruction(instruction, &stack);
87 }
88 return stack;
89 }
90
EmitSourcePosition(SourcePosition pos,bool always_emit)91 void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
92 const std::string& file = SourceFileMap::AbsolutePath(pos.source);
93 if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
94 // Lines in Torque SourcePositions are zero-based, while the
95 // CodeStubAssembler and downwind systems are one-based.
96 out() << " ca_.SetSourcePosition(\"" << file << "\", "
97 << (pos.start.line + 1) << ");\n";
98 previous_position_ = pos;
99 }
100 }
101
EmitInstruction(const PushUninitializedInstruction & instruction,Stack<std::string> * stack)102 void CSAGenerator::EmitInstruction(
103 const PushUninitializedInstruction& instruction,
104 Stack<std::string>* stack) {
105 // TODO(turbofan): This can trigger an error in CSA if it is used. Instead, we
106 // should prevent usage of uninitialized in the type system. This
107 // requires "if constexpr" being evaluated at Torque time.
108 const std::string str = "ca_.Uninitialized<" +
109 instruction.type->GetGeneratedTNodeTypeName() + ">()";
110 stack->Push(str);
111 SetDefinitionVariable(instruction.GetValueDefinition(), str);
112 }
113
EmitInstruction(const PushBuiltinPointerInstruction & instruction,Stack<std::string> * stack)114 void CSAGenerator::EmitInstruction(
115 const PushBuiltinPointerInstruction& instruction,
116 Stack<std::string>* stack) {
117 const std::string str =
118 "ca_.UncheckedCast<BuiltinPtr>(ca_.SmiConstant(Builtin::k" +
119 instruction.external_name + "))";
120 stack->Push(str);
121 SetDefinitionVariable(instruction.GetValueDefinition(), str);
122 }
123
EmitInstruction(const NamespaceConstantInstruction & instruction,Stack<std::string> * stack)124 void CSAGenerator::EmitInstruction(
125 const NamespaceConstantInstruction& instruction,
126 Stack<std::string>* stack) {
127 const Type* type = instruction.constant->type();
128 std::vector<std::string> results;
129
130 const auto lowered = LowerType(type);
131 for (std::size_t i = 0; i < lowered.size(); ++i) {
132 results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
133 stack->Push(results.back());
134 decls() << " TNode<" << lowered[i]->GetGeneratedTNodeTypeName() << "> "
135 << stack->Top() << ";\n";
136 }
137
138 out() << " ";
139 if (type->StructSupertype()) {
140 out() << "std::tie(";
141 PrintCommaSeparatedList(out(), results);
142 out() << ") = ";
143 } else if (results.size() == 1) {
144 out() << results[0] << " = ";
145 }
146 out() << instruction.constant->external_name() << "(state_)";
147 if (type->StructSupertype()) {
148 out() << ".Flatten();\n";
149 } else {
150 out() << ";\n";
151 }
152 }
153
ProcessArgumentsCommon(const TypeVector & parameter_types,std::vector<std::string> constexpr_arguments,Stack<std::string> * stack)154 std::vector<std::string> CSAGenerator::ProcessArgumentsCommon(
155 const TypeVector& parameter_types,
156 std::vector<std::string> constexpr_arguments, Stack<std::string>* stack) {
157 std::vector<std::string> args;
158 for (auto it = parameter_types.rbegin(); it != parameter_types.rend(); ++it) {
159 const Type* type = *it;
160 if (type->IsConstexpr()) {
161 args.push_back(std::move(constexpr_arguments.back()));
162 constexpr_arguments.pop_back();
163 } else {
164 std::stringstream s;
165 size_t slot_count = LoweredSlotCount(type);
166 VisitResult arg = VisitResult(type, stack->TopRange(slot_count));
167 EmitCSAValue(arg, *stack, s);
168 args.push_back(s.str());
169 stack->PopMany(slot_count);
170 }
171 }
172 std::reverse(args.begin(), args.end());
173 return args;
174 }
175
EmitInstruction(const CallIntrinsicInstruction & instruction,Stack<std::string> * stack)176 void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
177 Stack<std::string>* stack) {
178 TypeVector parameter_types =
179 instruction.intrinsic->signature().parameter_types.types;
180 std::vector<std::string> args = ProcessArgumentsCommon(
181 parameter_types, instruction.constexpr_arguments, stack);
182
183 Stack<std::string> pre_call_stack = *stack;
184 const Type* return_type = instruction.intrinsic->signature().return_type;
185 std::vector<std::string> results;
186
187 const auto lowered = LowerType(return_type);
188 for (std::size_t i = 0; i < lowered.size(); ++i) {
189 results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
190 stack->Push(results.back());
191 decls() << " TNode<" << lowered[i]->GetGeneratedTNodeTypeName() << "> "
192 << stack->Top() << ";\n";
193 }
194
195 out() << " ";
196 if (return_type->StructSupertype()) {
197 out() << "std::tie(";
198 PrintCommaSeparatedList(out(), results);
199 out() << ") = ";
200 } else {
201 if (results.size() == 1) {
202 out() << results[0] << " = ";
203 }
204 }
205
206 if (instruction.intrinsic->ExternalName() == "%RawDownCast") {
207 if (parameter_types.size() != 1) {
208 ReportError("%RawDownCast must take a single parameter");
209 }
210 const Type* original_type = parameter_types[0];
211 bool is_subtype =
212 return_type->IsSubtypeOf(original_type) ||
213 (original_type == TypeOracle::GetUninitializedHeapObjectType() &&
214 return_type->IsSubtypeOf(TypeOracle::GetHeapObjectType()));
215 if (!is_subtype) {
216 ReportError("%RawDownCast error: ", *return_type, " is not a subtype of ",
217 *original_type);
218 }
219 if (!original_type->StructSupertype() &&
220 return_type->GetGeneratedTNodeTypeName() !=
221 original_type->GetGeneratedTNodeTypeName()) {
222 if (return_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
223 out() << "TORQUE_CAST";
224 } else {
225 out() << "ca_.UncheckedCast<"
226 << return_type->GetGeneratedTNodeTypeName() << ">";
227 }
228 }
229 } else if (instruction.intrinsic->ExternalName() == "%GetClassMapConstant") {
230 if (parameter_types.size() != 0) {
231 ReportError("%GetClassMapConstant must not take parameters");
232 }
233 if (instruction.specialization_types.size() != 1) {
234 ReportError(
235 "%GetClassMapConstant must take a single class as specialization "
236 "parameter");
237 }
238 const ClassType* class_type =
239 ClassType::DynamicCast(instruction.specialization_types[0]);
240 if (!class_type) {
241 ReportError("%GetClassMapConstant must take a class type parameter");
242 }
243 // If the class isn't actually used as the parameter to a TNode,
244 // then we can't rely on the class existing in C++ or being of the same
245 // type (e.g. it could be a template), so don't use the template CSA
246 // machinery for accessing the class' map.
247 std::string class_name =
248 class_type->name() != class_type->GetGeneratedTNodeTypeName()
249 ? std::string("void")
250 : class_type->name();
251
252 out() << std::string("CodeStubAssembler(state_).GetClassMapConstant<") +
253 class_name + ">";
254 } else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
255 if (parameter_types.size() != 1 || !parameter_types[0]->IsConstexpr()) {
256 ReportError(
257 "%FromConstexpr must take a single parameter with constexpr "
258 "type");
259 }
260 if (return_type->IsConstexpr()) {
261 ReportError("%FromConstexpr must return a non-constexpr type");
262 }
263 if (return_type->IsSubtypeOf(TypeOracle::GetSmiType())) {
264 out() << "ca_.SmiConstant";
265 } else if (return_type->IsSubtypeOf(TypeOracle::GetNumberType())) {
266 out() << "ca_.NumberConstant";
267 } else if (return_type->IsSubtypeOf(TypeOracle::GetStringType())) {
268 out() << "ca_.StringConstant";
269 } else if (return_type->IsSubtypeOf(TypeOracle::GetObjectType())) {
270 ReportError(
271 "%FromConstexpr cannot cast to subclass of HeapObject unless it's a "
272 "String or Number");
273 } else if (return_type->IsSubtypeOf(TypeOracle::GetIntPtrType())) {
274 out() << "ca_.IntPtrConstant";
275 } else if (return_type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) {
276 out() << "ca_.UintPtrConstant";
277 } else if (return_type->IsSubtypeOf(TypeOracle::GetInt32Type())) {
278 out() << "ca_.Int32Constant";
279 } else if (return_type->IsSubtypeOf(TypeOracle::GetUint32Type())) {
280 out() << "ca_.Uint32Constant";
281 } else if (return_type->IsSubtypeOf(TypeOracle::GetInt64Type())) {
282 out() << "ca_.Int64Constant";
283 } else if (return_type->IsSubtypeOf(TypeOracle::GetUint64Type())) {
284 out() << "ca_.Uint64Constant";
285 } else if (return_type->IsSubtypeOf(TypeOracle::GetBoolType())) {
286 out() << "ca_.BoolConstant";
287 } else {
288 std::stringstream s;
289 s << "%FromConstexpr does not support return type " << *return_type;
290 ReportError(s.str());
291 }
292 // Wrap the raw constexpr value in a static_cast to ensure that
293 // enums get properly casted to their backing integral value.
294 out() << "(CastToUnderlyingTypeIfEnum";
295 } else {
296 ReportError("no built in intrinsic with name " +
297 instruction.intrinsic->ExternalName());
298 }
299
300 out() << "(";
301 PrintCommaSeparatedList(out(), args);
302 if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
303 out() << ")";
304 }
305 if (return_type->StructSupertype()) {
306 out() << ").Flatten();\n";
307 } else {
308 out() << ");\n";
309 }
310 }
311
EmitInstruction(const CallCsaMacroInstruction & instruction,Stack<std::string> * stack)312 void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
313 Stack<std::string>* stack) {
314 TypeVector parameter_types =
315 instruction.macro->signature().parameter_types.types;
316 std::vector<std::string> args = ProcessArgumentsCommon(
317 parameter_types, instruction.constexpr_arguments, stack);
318
319 Stack<std::string> pre_call_stack = *stack;
320 const Type* return_type = instruction.macro->signature().return_type;
321 std::vector<std::string> results;
322
323 const auto lowered = LowerType(return_type);
324 for (std::size_t i = 0; i < lowered.size(); ++i) {
325 results.push_back(DefinitionToVariable(instruction.GetValueDefinition(i)));
326 stack->Push(results.back());
327 decls() << " TNode<" << lowered[i]->GetGeneratedTNodeTypeName() << "> "
328 << stack->Top() << ";\n";
329 }
330
331 std::string catch_name =
332 PreCallableExceptionPreparation(instruction.catch_block);
333 out() << " ";
334 bool needs_flattening = return_type->StructSupertype().has_value();
335 if (needs_flattening) {
336 out() << "std::tie(";
337 PrintCommaSeparatedList(out(), results);
338 out() << ") = ";
339 } else {
340 if (results.size() == 1) {
341 out() << results[0] << " = ";
342 } else {
343 DCHECK_EQ(0, results.size());
344 }
345 }
346 if (ExternMacro* extern_macro = ExternMacro::DynamicCast(instruction.macro)) {
347 out() << extern_macro->external_assembler_name() << "(state_).";
348 } else {
349 args.insert(args.begin(), "state_");
350 }
351 out() << instruction.macro->ExternalName() << "(";
352 PrintCommaSeparatedList(out(), args);
353 if (needs_flattening) {
354 out() << ").Flatten();\n";
355 } else {
356 out() << ");\n";
357 }
358 PostCallableExceptionPreparation(catch_name, return_type,
359 instruction.catch_block, &pre_call_stack,
360 instruction.GetExceptionObjectDefinition());
361 }
362
EmitInstruction(const CallCsaMacroAndBranchInstruction & instruction,Stack<std::string> * stack)363 void CSAGenerator::EmitInstruction(
364 const CallCsaMacroAndBranchInstruction& instruction,
365 Stack<std::string>* stack) {
366 TypeVector parameter_types =
367 instruction.macro->signature().parameter_types.types;
368 std::vector<std::string> args = ProcessArgumentsCommon(
369 parameter_types, instruction.constexpr_arguments, stack);
370
371 Stack<std::string> pre_call_stack = *stack;
372 std::vector<std::string> results;
373 const Type* return_type = instruction.macro->signature().return_type;
374
375 if (return_type != TypeOracle::GetNeverType()) {
376 const auto lowered = LowerType(return_type);
377 for (std::size_t i = 0; i < lowered.size(); ++i) {
378 results.push_back(
379 DefinitionToVariable(instruction.GetValueDefinition(i)));
380 decls() << " TNode<" << lowered[i]->GetGeneratedTNodeTypeName() << "> "
381 << results.back() << ";\n";
382 }
383 }
384
385 std::vector<std::string> label_names;
386 std::vector<std::vector<std::string>> var_names;
387 const LabelDeclarationVector& labels = instruction.macro->signature().labels;
388 DCHECK_EQ(labels.size(), instruction.label_blocks.size());
389 for (size_t i = 0; i < labels.size(); ++i) {
390 TypeVector label_parameters = labels[i].types;
391 label_names.push_back(FreshLabelName());
392 var_names.push_back({});
393 for (size_t j = 0; j < label_parameters.size(); ++j) {
394 var_names[i].push_back(FreshNodeName());
395 const auto def = instruction.GetLabelValueDefinition(i, j);
396 SetDefinitionVariable(def, var_names[i].back() + ".value()");
397 decls() << " compiler::TypedCodeAssemblerVariable<"
398 << label_parameters[j]->GetGeneratedTNodeTypeName() << "> "
399 << var_names[i][j] << "(&ca_);\n";
400 }
401 out() << " compiler::CodeAssemblerLabel " << label_names[i]
402 << "(&ca_);\n";
403 }
404
405 std::string catch_name =
406 PreCallableExceptionPreparation(instruction.catch_block);
407 out() << " ";
408 if (results.size() == 1) {
409 out() << results[0] << " = ";
410 } else if (results.size() > 1) {
411 out() << "std::tie(";
412 PrintCommaSeparatedList(out(), results);
413 out() << ") = ";
414 }
415 if (ExternMacro* extern_macro = ExternMacro::DynamicCast(instruction.macro)) {
416 out() << extern_macro->external_assembler_name() << "(state_).";
417 } else {
418 args.insert(args.begin(), "state_");
419 }
420 out() << instruction.macro->ExternalName() << "(";
421 PrintCommaSeparatedList(out(), args);
422 bool first = args.empty();
423 for (size_t i = 0; i < label_names.size(); ++i) {
424 if (!first) out() << ", ";
425 out() << "&" << label_names[i];
426 first = false;
427 for (size_t j = 0; j < var_names[i].size(); ++j) {
428 out() << ", &" << var_names[i][j];
429 }
430 }
431 if (return_type->StructSupertype()) {
432 out() << ").Flatten();\n";
433 } else {
434 out() << ");\n";
435 }
436
437 PostCallableExceptionPreparation(catch_name, return_type,
438 instruction.catch_block, &pre_call_stack,
439 instruction.GetExceptionObjectDefinition());
440
441 if (instruction.return_continuation) {
442 out() << " ca_.Goto(&" << BlockName(*instruction.return_continuation);
443 DCHECK_EQ(stack->Size() + results.size(),
444 (*instruction.return_continuation)->InputDefinitions().Size());
445
446 const auto& input_definitions =
447 (*instruction.return_continuation)->InputDefinitions();
448 for (BottomOffset i = {0}; i < input_definitions.AboveTop(); ++i) {
449 if (input_definitions.Peek(i).IsPhiFromBlock(
450 *instruction.return_continuation)) {
451 out() << ", "
452 << (i < stack->AboveTop() ? stack->Peek(i) : results[i.offset]);
453 }
454 }
455 out() << ");\n";
456 }
457 for (size_t l = 0; l < label_names.size(); ++l) {
458 out() << " if (" << label_names[l] << ".is_used()) {\n";
459 out() << " ca_.Bind(&" << label_names[l] << ");\n";
460 out() << " ca_.Goto(&" << BlockName(instruction.label_blocks[l]);
461 DCHECK_EQ(stack->Size() + var_names[l].size(),
462 instruction.label_blocks[l]->InputDefinitions().Size());
463
464 const auto& label_definitions =
465 instruction.label_blocks[l]->InputDefinitions();
466
467 BottomOffset i = {0};
468 for (; i < stack->AboveTop(); ++i) {
469 if (label_definitions.Peek(i).IsPhiFromBlock(
470 instruction.label_blocks[l])) {
471 out() << ", " << stack->Peek(i);
472 }
473 }
474 for (std::size_t k = 0; k < var_names[l].size(); ++k, ++i) {
475 if (label_definitions.Peek(i).IsPhiFromBlock(
476 instruction.label_blocks[l])) {
477 out() << ", " << var_names[l][k] << ".value()";
478 }
479 }
480 out() << ");\n";
481 out() << " }\n";
482 }
483 }
484
EmitInstruction(const MakeLazyNodeInstruction & instruction,Stack<std::string> * stack)485 void CSAGenerator::EmitInstruction(const MakeLazyNodeInstruction& instruction,
486 Stack<std::string>* stack) {
487 TypeVector parameter_types =
488 instruction.macro->signature().parameter_types.types;
489 std::vector<std::string> args = ProcessArgumentsCommon(
490 parameter_types, instruction.constexpr_arguments, stack);
491
492 std::string result_name =
493 DefinitionToVariable(instruction.GetValueDefinition());
494
495 stack->Push(result_name);
496
497 decls() << " " << instruction.result_type->GetGeneratedTypeName() << " "
498 << result_name << ";\n";
499
500 // We assume here that the CodeAssemblerState will outlive any usage of
501 // the generated std::function that binds it. Likewise, copies of TNode values
502 // are only valid during generation of the current builtin.
503 out() << " " << result_name << " = [=] () { return ";
504 bool first = true;
505 if (const ExternMacro* extern_macro =
506 ExternMacro::DynamicCast(instruction.macro)) {
507 out() << extern_macro->external_assembler_name() << "(state_)."
508 << extern_macro->ExternalName() << "(";
509 } else {
510 out() << instruction.macro->ExternalName() << "(state_";
511 first = false;
512 }
513 if (!args.empty()) {
514 if (!first) out() << ", ";
515 PrintCommaSeparatedList(out(), args);
516 }
517 out() << "); };\n";
518 }
519
EmitInstruction(const CallBuiltinInstruction & instruction,Stack<std::string> * stack)520 void CSAGenerator::EmitInstruction(const CallBuiltinInstruction& instruction,
521 Stack<std::string>* stack) {
522 std::vector<std::string> arguments = stack->PopMany(instruction.argc);
523 std::vector<const Type*> result_types =
524 LowerType(instruction.builtin->signature().return_type);
525 if (instruction.is_tailcall) {
526 out() << " CodeStubAssembler(state_).TailCallBuiltin(Builtin::k"
527 << instruction.builtin->ExternalName();
528 if (!instruction.builtin->signature().HasContextParameter()) {
529 // Add dummy context parameter to satisfy the TailCallBuiltin signature.
530 out() << ", TNode<Object>()";
531 }
532 for (const std::string& argument : arguments) {
533 out() << ", " << argument;
534 }
535 out() << ");\n";
536 } else {
537 std::vector<std::string> result_names(result_types.size());
538 for (size_t i = 0; i < result_types.size(); ++i) {
539 result_names[i] = DefinitionToVariable(instruction.GetValueDefinition(i));
540 decls() << " TNode<" << result_types[i]->GetGeneratedTNodeTypeName()
541 << "> " << result_names[i] << ";\n";
542 }
543
544 std::string lhs_name;
545 std::string lhs_type;
546 switch (result_types.size()) {
547 case 1:
548 lhs_name = result_names[0];
549 lhs_type = result_types[0]->GetGeneratedTNodeTypeName();
550 break;
551 case 2:
552 // If a builtin returns two values, the return type is represented as a
553 // TNode containing a pair. We need a temporary place to store that
554 // result so we can unpack it into separate TNodes.
555 lhs_name = result_names[0] + "_and_" + result_names[1];
556 lhs_type = "PairT<" + result_types[0]->GetGeneratedTNodeTypeName() +
557 ", " + result_types[1]->GetGeneratedTNodeTypeName() + ">";
558 decls() << " TNode<" << lhs_type << "> " << lhs_name << ";\n";
559 break;
560 default:
561 ReportError(
562 "Torque can only call builtins that return one or two values, not ",
563 result_types.size());
564 }
565
566 std::string catch_name =
567 PreCallableExceptionPreparation(instruction.catch_block);
568 Stack<std::string> pre_call_stack = *stack;
569
570 std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
571 for (const std::string& name : result_names) {
572 stack->Push(name);
573 }
574 out() << " " << lhs_name << " = ";
575 out() << "ca_.CallStub<" << lhs_type
576 << ">(Builtins::CallableFor(ca_.isolate(), Builtin::k"
577 << instruction.builtin->ExternalName() << ")";
578 if (!instruction.builtin->signature().HasContextParameter()) {
579 // Add dummy context parameter to satisfy the CallBuiltin signature.
580 out() << ", TNode<Object>()";
581 }
582 for (const std::string& argument : arguments) {
583 out() << ", " << argument;
584 }
585 out() << ");\n";
586
587 if (result_types.size() > 1) {
588 for (size_t i = 0; i < result_types.size(); ++i) {
589 out() << " " << result_names[i] << " = ca_.Projection<" << i << ">("
590 << lhs_name << ");\n";
591 }
592 }
593
594 PostCallableExceptionPreparation(
595 catch_name,
596 result_types.size() == 0 ? TypeOracle::GetVoidType() : result_types[0],
597 instruction.catch_block, &pre_call_stack,
598 instruction.GetExceptionObjectDefinition());
599 }
600 }
601
EmitInstruction(const CallBuiltinPointerInstruction & instruction,Stack<std::string> * stack)602 void CSAGenerator::EmitInstruction(
603 const CallBuiltinPointerInstruction& instruction,
604 Stack<std::string>* stack) {
605 std::vector<std::string> arguments = stack->PopMany(instruction.argc);
606 std::string function = stack->Pop();
607 std::vector<const Type*> result_types =
608 LowerType(instruction.type->return_type());
609 if (result_types.size() != 1) {
610 ReportError("builtins must have exactly one result");
611 }
612 if (instruction.is_tailcall) {
613 ReportError("tail-calls to builtin pointers are not supported");
614 }
615
616 DCHECK_EQ(1, instruction.GetValueDefinitionCount());
617 stack->Push(DefinitionToVariable(instruction.GetValueDefinition(0)));
618 std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
619 decls() << " TNode<" << generated_type << "> " << stack->Top() << ";\n";
620 out() << stack->Top() << " = ";
621 if (generated_type != "Object") out() << "TORQUE_CAST(";
622 out() << "CodeStubAssembler(state_).CallBuiltinPointer(Builtins::"
623 "CallableFor(ca_."
624 "isolate(),"
625 "ExampleBuiltinForTorqueFunctionPointerType("
626 << instruction.type->function_pointer_type_id() << ")).descriptor(), "
627 << function;
628 if (!instruction.type->HasContextParameter()) {
629 // Add dummy context parameter to satisfy the CallBuiltinPointer signature.
630 out() << ", TNode<Object>()";
631 }
632 for (const std::string& argument : arguments) {
633 out() << ", " << argument;
634 }
635 out() << ")";
636 if (generated_type != "Object") out() << ")";
637 out() << ";\n";
638 }
639
PreCallableExceptionPreparation(base::Optional<Block * > catch_block)640 std::string CSAGenerator::PreCallableExceptionPreparation(
641 base::Optional<Block*> catch_block) {
642 std::string catch_name;
643 if (catch_block) {
644 catch_name = FreshCatchName();
645 out() << " compiler::CodeAssemblerExceptionHandlerLabel " << catch_name
646 << "__label(&ca_, compiler::CodeAssemblerLabel::kDeferred);\n";
647 out() << " { compiler::ScopedExceptionHandler s(&ca_, &" << catch_name
648 << "__label);\n";
649 }
650 return catch_name;
651 }
652
PostCallableExceptionPreparation(const std::string & catch_name,const Type * return_type,base::Optional<Block * > catch_block,Stack<std::string> * stack,const base::Optional<DefinitionLocation> & exception_object_definition)653 void CSAGenerator::PostCallableExceptionPreparation(
654 const std::string& catch_name, const Type* return_type,
655 base::Optional<Block*> catch_block, Stack<std::string>* stack,
656 const base::Optional<DefinitionLocation>& exception_object_definition) {
657 if (catch_block) {
658 DCHECK(exception_object_definition);
659 std::string block_name = BlockName(*catch_block);
660 out() << " }\n";
661 out() << " if (" << catch_name << "__label.is_used()) {\n";
662 out() << " compiler::CodeAssemblerLabel " << catch_name
663 << "_skip(&ca_);\n";
664 if (!return_type->IsNever()) {
665 out() << " ca_.Goto(&" << catch_name << "_skip);\n";
666 }
667 decls() << " TNode<Object> "
668 << DefinitionToVariable(*exception_object_definition) << ";\n";
669 out() << " ca_.Bind(&" << catch_name << "__label, &"
670 << DefinitionToVariable(*exception_object_definition) << ");\n";
671 out() << " ca_.Goto(&" << block_name;
672
673 DCHECK_EQ(stack->Size() + 1, (*catch_block)->InputDefinitions().Size());
674 const auto& input_definitions = (*catch_block)->InputDefinitions();
675 for (BottomOffset i = {0}; i < input_definitions.AboveTop(); ++i) {
676 if (input_definitions.Peek(i).IsPhiFromBlock(*catch_block)) {
677 if (i < stack->AboveTop()) {
678 out() << ", " << stack->Peek(i);
679 } else {
680 DCHECK_EQ(i, stack->AboveTop());
681 out() << ", " << DefinitionToVariable(*exception_object_definition);
682 }
683 }
684 }
685 out() << ");\n";
686
687 if (!return_type->IsNever()) {
688 out() << " ca_.Bind(&" << catch_name << "_skip);\n";
689 }
690 out() << " }\n";
691 }
692 }
693
EmitInstruction(const CallRuntimeInstruction & instruction,Stack<std::string> * stack)694 void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
695 Stack<std::string>* stack) {
696 std::vector<std::string> arguments = stack->PopMany(instruction.argc);
697 const Type* return_type =
698 instruction.runtime_function->signature().return_type;
699 std::vector<const Type*> result_types;
700 if (return_type != TypeOracle::GetNeverType()) {
701 result_types = LowerType(return_type);
702 }
703 if (result_types.size() > 1) {
704 ReportError("runtime function must have at most one result");
705 }
706 if (instruction.is_tailcall) {
707 out() << " CodeStubAssembler(state_).TailCallRuntime(Runtime::k"
708 << instruction.runtime_function->ExternalName() << ", ";
709 PrintCommaSeparatedList(out(), arguments);
710 out() << ");\n";
711 } else {
712 std::string result_name;
713 if (result_types.size() == 1) {
714 result_name = DefinitionToVariable(instruction.GetValueDefinition(0));
715 decls() << " TNode<" << result_types[0]->GetGeneratedTNodeTypeName()
716 << "> " << result_name << ";\n";
717 }
718 std::string catch_name =
719 PreCallableExceptionPreparation(instruction.catch_block);
720 Stack<std::string> pre_call_stack = *stack;
721 if (result_types.size() == 1) {
722 std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
723 stack->Push(result_name);
724 out() << " " << result_name << " = ";
725 if (generated_type != "Object") out() << "TORQUE_CAST(";
726 out() << "CodeStubAssembler(state_).CallRuntime(Runtime::k"
727 << instruction.runtime_function->ExternalName() << ", ";
728 PrintCommaSeparatedList(out(), arguments);
729 out() << ")";
730 if (generated_type != "Object") out() << ")";
731 out() << "; \n";
732 } else {
733 DCHECK_EQ(0, result_types.size());
734 out() << " CodeStubAssembler(state_).CallRuntime(Runtime::k"
735 << instruction.runtime_function->ExternalName() << ", ";
736 PrintCommaSeparatedList(out(), arguments);
737 out() << ");\n";
738 if (return_type == TypeOracle::GetNeverType()) {
739 out() << " CodeStubAssembler(state_).Unreachable();\n";
740 } else {
741 DCHECK(return_type == TypeOracle::GetVoidType());
742 }
743 }
744 PostCallableExceptionPreparation(
745 catch_name, return_type, instruction.catch_block, &pre_call_stack,
746 instruction.GetExceptionObjectDefinition());
747 }
748 }
749
EmitInstruction(const BranchInstruction & instruction,Stack<std::string> * stack)750 void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
751 Stack<std::string>* stack) {
752 out() << " ca_.Branch(" << stack->Pop() << ", &"
753 << BlockName(instruction.if_true) << ", std::vector<compiler::Node*>{";
754
755 const auto& true_definitions = instruction.if_true->InputDefinitions();
756 DCHECK_EQ(stack->Size(), true_definitions.Size());
757 bool first = true;
758 for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
759 if (true_definitions.Peek(i).IsPhiFromBlock(instruction.if_true)) {
760 if (!first) out() << ", ";
761 out() << stack->Peek(i);
762 first = false;
763 }
764 }
765
766 out() << "}, &" << BlockName(instruction.if_false)
767 << ", std::vector<compiler::Node*>{";
768
769 const auto& false_definitions = instruction.if_false->InputDefinitions();
770 DCHECK_EQ(stack->Size(), false_definitions.Size());
771 first = true;
772 for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
773 if (false_definitions.Peek(i).IsPhiFromBlock(instruction.if_false)) {
774 if (!first) out() << ", ";
775 out() << stack->Peek(i);
776 first = false;
777 }
778 }
779
780 out() << "});\n";
781 }
782
EmitInstruction(const ConstexprBranchInstruction & instruction,Stack<std::string> * stack)783 void CSAGenerator::EmitInstruction(
784 const ConstexprBranchInstruction& instruction, Stack<std::string>* stack) {
785 out() << " if ((" << instruction.condition << ")) {\n";
786 out() << " ca_.Goto(&" << BlockName(instruction.if_true);
787
788 const auto& true_definitions = instruction.if_true->InputDefinitions();
789 DCHECK_EQ(stack->Size(), true_definitions.Size());
790 for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
791 if (true_definitions.Peek(i).IsPhiFromBlock(instruction.if_true)) {
792 out() << ", " << stack->Peek(i);
793 }
794 }
795
796 out() << ");\n";
797 out() << " } else {\n";
798 out() << " ca_.Goto(&" << BlockName(instruction.if_false);
799
800 const auto& false_definitions = instruction.if_false->InputDefinitions();
801 DCHECK_EQ(stack->Size(), false_definitions.Size());
802 for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
803 if (false_definitions.Peek(i).IsPhiFromBlock(instruction.if_false)) {
804 out() << ", " << stack->Peek(i);
805 }
806 }
807
808 out() << ");\n";
809 out() << " }\n";
810 }
811
EmitInstruction(const GotoInstruction & instruction,Stack<std::string> * stack)812 void CSAGenerator::EmitInstruction(const GotoInstruction& instruction,
813 Stack<std::string>* stack) {
814 out() << " ca_.Goto(&" << BlockName(instruction.destination);
815 const auto& destination_definitions =
816 instruction.destination->InputDefinitions();
817 DCHECK_EQ(stack->Size(), destination_definitions.Size());
818 for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
819 if (destination_definitions.Peek(i).IsPhiFromBlock(
820 instruction.destination)) {
821 out() << ", " << stack->Peek(i);
822 }
823 }
824 out() << ");\n";
825 }
826
EmitInstruction(const GotoExternalInstruction & instruction,Stack<std::string> * stack)827 void CSAGenerator::EmitInstruction(const GotoExternalInstruction& instruction,
828 Stack<std::string>* stack) {
829 for (auto it = instruction.variable_names.rbegin();
830 it != instruction.variable_names.rend(); ++it) {
831 out() << " *" << *it << " = " << stack->Pop() << ";\n";
832 }
833 out() << " ca_.Goto(" << instruction.destination << ");\n";
834 }
835
EmitInstruction(const ReturnInstruction & instruction,Stack<std::string> * stack)836 void CSAGenerator::EmitInstruction(const ReturnInstruction& instruction,
837 Stack<std::string>* stack) {
838 if (*linkage_ == Builtin::kVarArgsJavaScript) {
839 out() << " " << ARGUMENTS_VARIABLE_STRING << ".PopAndReturn(";
840 } else {
841 out() << " CodeStubAssembler(state_).Return(";
842 }
843 std::vector<std::string> values = stack->PopMany(instruction.count);
844 PrintCommaSeparatedList(out(), values);
845 out() << ");\n";
846 }
847
EmitInstruction(const PrintConstantStringInstruction & instruction,Stack<std::string> * stack)848 void CSAGenerator::EmitInstruction(
849 const PrintConstantStringInstruction& instruction,
850 Stack<std::string>* stack) {
851 out() << " CodeStubAssembler(state_).Print("
852 << StringLiteralQuote(instruction.message) << ");\n";
853 }
854
EmitInstruction(const AbortInstruction & instruction,Stack<std::string> * stack)855 void CSAGenerator::EmitInstruction(const AbortInstruction& instruction,
856 Stack<std::string>* stack) {
857 switch (instruction.kind) {
858 case AbortInstruction::Kind::kUnreachable:
859 DCHECK(instruction.message.empty());
860 out() << " CodeStubAssembler(state_).Unreachable();\n";
861 break;
862 case AbortInstruction::Kind::kDebugBreak:
863 DCHECK(instruction.message.empty());
864 out() << " CodeStubAssembler(state_).DebugBreak();\n";
865 break;
866 case AbortInstruction::Kind::kAssertionFailure: {
867 std::string file = StringLiteralQuote(
868 SourceFileMap::PathFromV8Root(instruction.pos.source));
869 out() << " {\n";
870 out() << " auto pos_stack = ca_.GetMacroSourcePositionStack();\n";
871 out() << " pos_stack.push_back({" << file << ", "
872 << instruction.pos.start.line + 1 << "});\n";
873 out() << " CodeStubAssembler(state_).FailAssert("
874 << StringLiteralQuote(instruction.message) << ", pos_stack);\n";
875 out() << " }\n";
876 break;
877 }
878 }
879 }
880
EmitInstruction(const UnsafeCastInstruction & instruction,Stack<std::string> * stack)881 void CSAGenerator::EmitInstruction(const UnsafeCastInstruction& instruction,
882 Stack<std::string>* stack) {
883 const std::string str =
884 "ca_.UncheckedCast<" +
885 instruction.destination_type->GetGeneratedTNodeTypeName() + ">(" +
886 stack->Top() + ")";
887 stack->Poke(stack->AboveTop() - 1, str);
888 SetDefinitionVariable(instruction.GetValueDefinition(), str);
889 }
890
EmitInstruction(const LoadReferenceInstruction & instruction,Stack<std::string> * stack)891 void CSAGenerator::EmitInstruction(const LoadReferenceInstruction& instruction,
892 Stack<std::string>* stack) {
893 std::string result_name =
894 DefinitionToVariable(instruction.GetValueDefinition());
895
896 std::string offset = stack->Pop();
897 std::string object = stack->Pop();
898 stack->Push(result_name);
899
900 decls() << " " << instruction.type->GetGeneratedTypeName() << " "
901 << result_name << ";\n";
902 out() << " " << result_name
903 << " = CodeStubAssembler(state_).LoadReference<"
904 << instruction.type->GetGeneratedTNodeTypeName()
905 << ">(CodeStubAssembler::Reference{" << object << ", " << offset
906 << "});\n";
907 }
908
EmitInstruction(const StoreReferenceInstruction & instruction,Stack<std::string> * stack)909 void CSAGenerator::EmitInstruction(const StoreReferenceInstruction& instruction,
910 Stack<std::string>* stack) {
911 std::string value = stack->Pop();
912 std::string offset = stack->Pop();
913 std::string object = stack->Pop();
914
915 out() << " CodeStubAssembler(state_).StoreReference<"
916 << instruction.type->GetGeneratedTNodeTypeName()
917 << ">(CodeStubAssembler::"
918 "Reference{"
919 << object << ", " << offset << "}, " << value << ");\n";
920 }
921
922 namespace {
GetBitFieldSpecialization(const Type * container,const BitField & field)923 std::string GetBitFieldSpecialization(const Type* container,
924 const BitField& field) {
925 auto smi_tagged_type =
926 Type::MatchUnaryGeneric(container, TypeOracle::GetSmiTaggedGeneric());
927 std::string container_type = smi_tagged_type
928 ? "uintptr_t"
929 : container->GetConstexprGeneratedTypeName();
930 int offset = smi_tagged_type
931 ? field.offset + TargetArchitecture::SmiTagAndShiftSize()
932 : field.offset;
933 std::stringstream stream;
934 stream << "base::BitField<"
935 << field.name_and_type.type->GetConstexprGeneratedTypeName() << ", "
936 << offset << ", " << field.num_bits << ", " << container_type << ">";
937 return stream.str();
938 }
939 } // namespace
940
EmitInstruction(const LoadBitFieldInstruction & instruction,Stack<std::string> * stack)941 void CSAGenerator::EmitInstruction(const LoadBitFieldInstruction& instruction,
942 Stack<std::string>* stack) {
943 std::string result_name =
944 DefinitionToVariable(instruction.GetValueDefinition());
945
946 std::string bit_field_struct = stack->Pop();
947 stack->Push(result_name);
948
949 const Type* struct_type = instruction.bit_field_struct_type;
950 const Type* field_type = instruction.bit_field.name_and_type.type;
951 auto smi_tagged_type =
952 Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric());
953 bool struct_is_pointer_size =
954 IsPointerSizeIntegralType(struct_type) || smi_tagged_type;
955 DCHECK_IMPLIES(!struct_is_pointer_size, Is32BitIntegralType(struct_type));
956 bool field_is_pointer_size = IsPointerSizeIntegralType(field_type);
957 DCHECK_IMPLIES(!field_is_pointer_size, Is32BitIntegralType(field_type));
958 std::string struct_word_type = struct_is_pointer_size ? "WordT" : "Word32T";
959 std::string decoder =
960 struct_is_pointer_size
961 ? (field_is_pointer_size ? "DecodeWord" : "DecodeWord32FromWord")
962 : (field_is_pointer_size ? "DecodeWordFromWord32" : "DecodeWord32");
963
964 decls() << " " << field_type->GetGeneratedTypeName() << " " << result_name
965 << ";\n";
966
967 if (smi_tagged_type) {
968 // If the container is a SMI, then UncheckedCast is insufficient and we must
969 // use a bit cast.
970 bit_field_struct =
971 "ca_.BitcastTaggedToWordForTagAndSmiBits(" + bit_field_struct + ")";
972 }
973
974 out() << " " << result_name << " = ca_.UncheckedCast<"
975 << field_type->GetGeneratedTNodeTypeName()
976 << ">(CodeStubAssembler(state_)." << decoder << "<"
977 << GetBitFieldSpecialization(struct_type, instruction.bit_field)
978 << ">(ca_.UncheckedCast<" << struct_word_type << ">("
979 << bit_field_struct << ")));\n";
980 }
981
EmitInstruction(const StoreBitFieldInstruction & instruction,Stack<std::string> * stack)982 void CSAGenerator::EmitInstruction(const StoreBitFieldInstruction& instruction,
983 Stack<std::string>* stack) {
984 std::string result_name =
985 DefinitionToVariable(instruction.GetValueDefinition());
986
987 std::string value = stack->Pop();
988 std::string bit_field_struct = stack->Pop();
989 stack->Push(result_name);
990
991 const Type* struct_type = instruction.bit_field_struct_type;
992 const Type* field_type = instruction.bit_field.name_and_type.type;
993 auto smi_tagged_type =
994 Type::MatchUnaryGeneric(struct_type, TypeOracle::GetSmiTaggedGeneric());
995 bool struct_is_pointer_size =
996 IsPointerSizeIntegralType(struct_type) || smi_tagged_type;
997 DCHECK_IMPLIES(!struct_is_pointer_size, Is32BitIntegralType(struct_type));
998 bool field_is_pointer_size = IsPointerSizeIntegralType(field_type);
999 DCHECK_IMPLIES(!field_is_pointer_size, Is32BitIntegralType(field_type));
1000 std::string struct_word_type = struct_is_pointer_size ? "WordT" : "Word32T";
1001 std::string field_word_type = field_is_pointer_size ? "UintPtrT" : "Uint32T";
1002 std::string encoder =
1003 struct_is_pointer_size
1004 ? (field_is_pointer_size ? "UpdateWord" : "UpdateWord32InWord")
1005 : (field_is_pointer_size ? "UpdateWordInWord32" : "UpdateWord32");
1006
1007 decls() << " " << struct_type->GetGeneratedTypeName() << " " << result_name
1008 << ";\n";
1009
1010 if (smi_tagged_type) {
1011 // If the container is a SMI, then UncheckedCast is insufficient and we must
1012 // use a bit cast.
1013 bit_field_struct =
1014 "ca_.BitcastTaggedToWordForTagAndSmiBits(" + bit_field_struct + ")";
1015 }
1016
1017 std::string result_expression =
1018 "CodeStubAssembler(state_)." + encoder + "<" +
1019 GetBitFieldSpecialization(struct_type, instruction.bit_field) +
1020 ">(ca_.UncheckedCast<" + struct_word_type + ">(" + bit_field_struct +
1021 "), ca_.UncheckedCast<" + field_word_type + ">(" + value + ")" +
1022 (instruction.starts_as_zero ? ", true" : "") + ")";
1023
1024 if (smi_tagged_type) {
1025 result_expression =
1026 "ca_.BitcastWordToTaggedSigned(" + result_expression + ")";
1027 }
1028
1029 out() << " " << result_name << " = ca_.UncheckedCast<"
1030 << struct_type->GetGeneratedTNodeTypeName() << ">(" << result_expression
1031 << ");\n";
1032 }
1033
1034 // static
EmitCSAValue(VisitResult result,const Stack<std::string> & values,std::ostream & out)1035 void CSAGenerator::EmitCSAValue(VisitResult result,
1036 const Stack<std::string>& values,
1037 std::ostream& out) {
1038 if (!result.IsOnStack()) {
1039 out << result.constexpr_value();
1040 } else if (auto struct_type = result.type()->StructSupertype()) {
1041 out << (*struct_type)->GetGeneratedTypeName() << "{";
1042 bool first = true;
1043 for (auto& field : (*struct_type)->fields()) {
1044 if (!first) {
1045 out << ", ";
1046 }
1047 first = false;
1048 EmitCSAValue(ProjectStructField(result, field.name_and_type.name), values,
1049 out);
1050 }
1051 out << "}";
1052 } else {
1053 DCHECK_EQ(1, result.stack_range().Size());
1054 out << result.type()->GetGeneratedTypeName() << "{"
1055 << values.Peek(result.stack_range().begin()) << "}";
1056 }
1057 }
1058
1059 } // namespace torque
1060 } // namespace internal
1061 } // namespace v8
1062