• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/torque/implementation-visitor.h"
6 
7 #include <algorithm>
8 #include <iomanip>
9 #include <string>
10 
11 #include "src/base/optional.h"
12 #include "src/common/globals.h"
13 #include "src/numbers/integer-literal-inl.h"
14 #include "src/torque/cc-generator.h"
15 #include "src/torque/cfg.h"
16 #include "src/torque/constants.h"
17 #include "src/torque/cpp-builder.h"
18 #include "src/torque/csa-generator.h"
19 #include "src/torque/declaration-visitor.h"
20 #include "src/torque/global-context.h"
21 #include "src/torque/kythe-data.h"
22 #include "src/torque/parameter-difference.h"
23 #include "src/torque/server-data.h"
24 #include "src/torque/source-positions.h"
25 #include "src/torque/type-inference.h"
26 #include "src/torque/type-visitor.h"
27 #include "src/torque/types.h"
28 #include "src/torque/utils.h"
29 
30 namespace v8 {
31 namespace internal {
32 namespace torque {
33 
34 uint64_t next_unique_binding_index = 0;
35 
36 // Sadly, 'using std::string_literals::operator""s;' is bugged in MSVC (see
37 // https://developercommunity.visualstudio.com/t/Incorrect-warning-when-using-standard-st/673948).
38 // TODO(nicohartmann@): Change to 'using std::string_literals::operator""s;'
39 // once this is fixed.
40 using namespace std::string_literals;  // NOLINT(build/namespaces)
41 
42 namespace {
43 const char* BuiltinIncludesMarker = "// __BUILTIN_INCLUDES_MARKER__\n";
44 }  // namespace
45 
Visit(Expression * expr)46 VisitResult ImplementationVisitor::Visit(Expression* expr) {
47   CurrentSourcePosition::Scope scope(expr->pos);
48   switch (expr->kind) {
49 #define ENUM_ITEM(name)        \
50   case AstNode::Kind::k##name: \
51     return Visit(name::cast(expr));
52     AST_EXPRESSION_NODE_KIND_LIST(ENUM_ITEM)
53 #undef ENUM_ITEM
54     default:
55       UNREACHABLE();
56   }
57 }
58 
Visit(Statement * stmt)59 const Type* ImplementationVisitor::Visit(Statement* stmt) {
60   CurrentSourcePosition::Scope scope(stmt->pos);
61   StackScope stack_scope(this);
62   const Type* result;
63   switch (stmt->kind) {
64 #define ENUM_ITEM(name)               \
65   case AstNode::Kind::k##name:        \
66     result = Visit(name::cast(stmt)); \
67     break;
68     AST_STATEMENT_NODE_KIND_LIST(ENUM_ITEM)
69 #undef ENUM_ITEM
70     default:
71       UNREACHABLE();
72   }
73   DCHECK_EQ(result == TypeOracle::GetNeverType(),
74             assembler().CurrentBlockIsComplete());
75   return result;
76 }
77 
BeginGeneratedFiles()78 void ImplementationVisitor::BeginGeneratedFiles() {
79   std::set<SourceId> contains_class_definitions;
80   for (const ClassType* type : TypeOracle::GetClasses()) {
81     if (type->ShouldGenerateCppClassDefinitions()) {
82       contains_class_definitions.insert(type->AttributedToFile());
83     }
84   }
85 
86   for (SourceId source : SourceFileMap::AllSources()) {
87     auto& streams = GlobalContext::GeneratedPerFile(source);
88     // Output beginning of CSA .cc file.
89     {
90       cpp::File& file = streams.csa_cc;
91 
92       for (const std::string& include_path : GlobalContext::CppIncludes()) {
93         file << "#include " << StringLiteralQuote(include_path) << "\n";
94       }
95 
96       file << "// Required Builtins:\n";
97       file << "#include \"torque-generated/" +
98                   SourceFileMap::PathFromV8RootWithoutExtension(source) +
99                   "-tq-csa.h\"\n";
100       // Now that required include files are collected while generting the file,
101       // we only know the full set at the end. Insert a marker here that is
102       // replaced with the list of includes at the very end.
103       // TODO(nicohartmann@): This is not the most beautiful way to do this,
104       // replace once the cpp file builder is available, where this can be
105       // handled easily.
106       file << BuiltinIncludesMarker;
107       file << "\n";
108 
109       streams.csa_cc.BeginNamespace("v8", "internal");
110       streams.csa_ccfile << "\n";
111     }
112     // Output beginning of CSA .h file.
113     {
114       cpp::File& file = streams.csa_header;
115       std::string header_define =
116           "V8_GEN_TORQUE_GENERATED_" +
117           UnderlinifyPath(SourceFileMap::PathFromV8Root(source)) + "_CSA_H_";
118       streams.csa_header.BeginIncludeGuard(header_define);
119       file << "#include \"src/builtins/torque-csa-header-includes.h\"\n";
120       file << "\n";
121 
122       streams.csa_header.BeginNamespace("v8", "internal");
123       streams.csa_headerfile << "\n";
124     }
125     // Output beginning of class definition .cc file.
126     {
127       cpp::File& file = streams.class_definition_cc;
128       if (contains_class_definitions.count(source) != 0) {
129         file << "#include \""
130              << SourceFileMap::PathFromV8RootWithoutExtension(source)
131              << "-inl.h\"\n\n";
132         file << "#include \"torque-generated/class-verifiers.h\"\n";
133         file << "#include \"src/objects/instance-type-inl.h\"\n\n";
134       }
135 
136       streams.class_definition_cc.BeginNamespace("v8", "internal");
137       streams.class_definition_ccfile << "\n";
138     }
139   }
140 }
141 
EndGeneratedFiles()142 void ImplementationVisitor::EndGeneratedFiles() {
143   for (SourceId file : SourceFileMap::AllSources()) {
144     auto& streams = GlobalContext::GeneratedPerFile(file);
145 
146     // Output ending of CSA .cc file.
147     streams.csa_cc.EndNamespace("v8", "internal");
148 
149     // Output ending of CSA .h file.
150     {
151       std::string header_define =
152           "V8_GEN_TORQUE_GENERATED_" +
153           UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_CSA_H_";
154 
155       streams.csa_header.EndNamespace("v8", "internal");
156       streams.csa_headerfile << "\n";
157       streams.csa_header.EndIncludeGuard(header_define);
158     }
159 
160     // Output ending of class definition .cc file.
161     streams.class_definition_cc.EndNamespace("v8", "internal");
162   }
163 }
164 
BeginDebugMacrosFile()165 void ImplementationVisitor::BeginDebugMacrosFile() {
166   // TODO(torque-builer): Can use builder for debug_macros_*_
167   std::ostream& source = debug_macros_cc_;
168   std::ostream& header = debug_macros_h_;
169 
170   source << "#include \"torque-generated/debug-macros.h\"\n\n";
171   source << "#include \"src/objects/swiss-name-dictionary.h\"\n";
172   source << "#include \"src/objects/ordered-hash-table.h\"\n";
173   source << "#include \"tools/debug_helper/debug-macro-shims.h\"\n";
174   source << "#include \"include/v8-internal.h\"\n";
175   source << "\n";
176 
177   source << "namespace v8 {\n"
178          << "namespace internal {\n"
179          << "namespace debug_helper_internal {\n"
180          << "\n";
181 
182   const char* kHeaderDefine = "V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_";
183   header << "#ifndef " << kHeaderDefine << "\n";
184   header << "#define " << kHeaderDefine << "\n\n";
185   header << "#include \"tools/debug_helper/debug-helper-internal.h\"\n";
186   header << "#include \"src/numbers/integer-literal.h\"\n";
187   header << "\n";
188 
189   header << "namespace v8 {\n"
190          << "namespace internal {\n"
191          << "namespace debug_helper_internal {\n"
192          << "\n";
193 }
194 
EndDebugMacrosFile()195 void ImplementationVisitor::EndDebugMacrosFile() {
196   // TODO(torque-builder): Can use builder for debug_macros_*_
197   std::ostream& source = debug_macros_cc_;
198   std::ostream& header = debug_macros_h_;
199 
200   source << "}  // namespace internal\n"
201          << "}  // namespace v8\n"
202          << "}  // namespace debug_helper_internal\n"
203          << "\n";
204 
205   header << "\n}  // namespace internal\n"
206          << "}  // namespace v8\n"
207          << "}  // namespace debug_helper_internal\n"
208          << "\n";
209   header << "#endif  // V8_GEN_TORQUE_GENERATED_DEBUG_MACROS_H_\n";
210 }
211 
Visit(NamespaceConstant * decl)212 void ImplementationVisitor::Visit(NamespaceConstant* decl) {
213   Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(),
214                       {}, false};
215 
216   BindingsManagersScope bindings_managers_scope;
217 
218   cpp::Function f =
219       GenerateFunction(nullptr, decl->external_name(), signature, {});
220 
221   f.PrintDeclaration(csa_headerfile());
222 
223   f.PrintDefinition(csa_ccfile(), [&](std::ostream& stream) {
224     stream << "  compiler::CodeAssembler ca_(state_);\n";
225 
226     DCHECK(!signature.return_type->IsVoidOrNever());
227 
228     assembler_ = CfgAssembler(Stack<const Type*>{});
229 
230     VisitResult expression_result = Visit(decl->body());
231     VisitResult return_result =
232         GenerateImplicitConvert(signature.return_type, expression_result);
233 
234     CSAGenerator csa_generator{assembler().Result(), stream};
235     Stack<std::string> values = *csa_generator.EmitGraph(Stack<std::string>{});
236 
237     assembler_ = base::nullopt;
238 
239     stream << "  return ";
240     CSAGenerator::EmitCSAValue(return_result, values, stream);
241     stream << ";";
242   });
243 }
244 
Visit(TypeAlias * alias)245 void ImplementationVisitor::Visit(TypeAlias* alias) {
246   if (alias->IsRedeclaration()) return;
247   if (const ClassType* class_type = ClassType::DynamicCast(alias->type())) {
248     if (class_type->IsExtern() && !class_type->nspace()->IsDefaultNamespace()) {
249       Error(
250           "extern classes are currently only supported in the default "
251           "namespace");
252     }
253   }
254 }
255 
256 class ImplementationVisitor::MacroInliningScope {
257  public:
MacroInliningScope(ImplementationVisitor * visitor,const Macro * macro)258   MacroInliningScope(ImplementationVisitor* visitor, const Macro* macro)
259       : visitor_(visitor), macro_(macro) {
260     if (!visitor_->inlining_macros_.insert(macro).second) {
261       // Recursive macro expansion would just keep going until stack overflow.
262       // To avoid crashes, throw an error immediately.
263       ReportError("Recursive macro call to ", *macro);
264     }
265   }
~MacroInliningScope()266   ~MacroInliningScope() { visitor_->inlining_macros_.erase(macro_); }
267 
268  private:
269   ImplementationVisitor* visitor_;
270   const Macro* macro_;
271 };
272 
InlineMacro(Macro * macro,base::Optional<LocationReference> this_reference,const std::vector<VisitResult> & arguments,const std::vector<Block * > label_blocks)273 VisitResult ImplementationVisitor::InlineMacro(
274     Macro* macro, base::Optional<LocationReference> this_reference,
275     const std::vector<VisitResult>& arguments,
276     const std::vector<Block*> label_blocks) {
277   MacroInliningScope macro_inlining_scope(this, macro);
278   CurrentScope::Scope current_scope(macro);
279   BindingsManagersScope bindings_managers_scope;
280   CurrentCallable::Scope current_callable(macro);
281   CurrentReturnValue::Scope current_return_value;
282   const Signature& signature = macro->signature();
283   const Type* return_type = macro->signature().return_type;
284   bool can_return = return_type != TypeOracle::GetNeverType();
285 
286   BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
287   BlockBindings<LocalLabel> label_bindings(&LabelBindingsManager::Get());
288   DCHECK_EQ(macro->signature().parameter_names.size(),
289             arguments.size() + (this_reference ? 1 : 0));
290   DCHECK_EQ(this_reference.has_value(), macro->IsMethod());
291 
292   // Bind the this for methods. Methods that modify a struct-type "this" must
293   // only be called if the this is in a variable, in which case the
294   // LocalValue is non-const. Otherwise, the LocalValue used for the parameter
295   // binding is const, and thus read-only, which will cause errors if
296   // modified, e.g. when called by a struct method that sets the structs
297   // fields. This prevents using temporary struct values for anything other
298   // than read operations.
299   if (this_reference) {
300     DCHECK(macro->IsMethod());
301     parameter_bindings.Add(kThisParameterName, LocalValue{*this_reference},
302                            true);
303     // TODO(v8:12261): Tracking 'this'-binding for kythe led to a few weird
304     // issues. Review to fully support 'this' in methods.
305   }
306 
307   size_t count = 0;
308   for (auto arg : arguments) {
309     if (this_reference && count == signature.implicit_count) count++;
310     const bool mark_as_used = signature.implicit_count > count;
311     const Identifier* name = macro->parameter_names()[count++];
312     Binding<LocalValue>* binding =
313         parameter_bindings.Add(name,
314                                LocalValue{LocationReference::Temporary(
315                                    arg, "parameter " + name->value)},
316                                mark_as_used);
317     if (GlobalContext::collect_kythe_data()) {
318       KytheData::AddBindingDefinition(binding);
319     }
320   }
321 
322   DCHECK_EQ(label_blocks.size(), signature.labels.size());
323   for (size_t i = 0; i < signature.labels.size(); ++i) {
324     const LabelDeclaration& label_info = signature.labels[i];
325     Binding<LocalLabel>* binding = label_bindings.Add(
326         label_info.name, LocalLabel{label_blocks[i], label_info.types});
327     if (GlobalContext::collect_kythe_data()) {
328       KytheData::AddBindingDefinition(binding);
329     }
330   }
331 
332   Block* macro_end;
333   base::Optional<Binding<LocalLabel>> macro_end_binding;
334   if (can_return) {
335     Stack<const Type*> stack = assembler().CurrentStack();
336     std::vector<const Type*> lowered_return_types = LowerType(return_type);
337     stack.PushMany(lowered_return_types);
338     if (!return_type->IsConstexpr()) {
339       SetReturnValue(VisitResult(return_type,
340                                  stack.TopRange(lowered_return_types.size())));
341     }
342     // The stack copy used to initialize the _macro_end block is only used
343     // as a template for the actual gotos generated by return statements. It
344     // doesn't correspond to any real return values, and thus shouldn't contain
345     // top types, because these would pollute actual return value types that get
346     // unioned with them for return statements, erroneously forcing them to top.
347     for (auto i = stack.begin(); i != stack.end(); ++i) {
348       if ((*i)->IsTopType()) {
349         *i = TopType::cast(*i)->source_type();
350       }
351     }
352     macro_end = assembler().NewBlock(std::move(stack));
353     macro_end_binding.emplace(&LabelBindingsManager::Get(), kMacroEndLabelName,
354                               LocalLabel{macro_end, {return_type}});
355   } else {
356     SetReturnValue(VisitResult::NeverResult());
357   }
358 
359   const Type* result = Visit(*macro->body());
360 
361   if (result->IsNever()) {
362     if (!return_type->IsNever() && !macro->HasReturns()) {
363       std::stringstream s;
364       s << "macro " << macro->ReadableName()
365         << " that never returns must have return type never";
366       ReportError(s.str());
367     }
368   } else {
369     if (return_type->IsNever()) {
370       std::stringstream s;
371       s << "macro " << macro->ReadableName()
372         << " has implicit return at end of its declartion but return type "
373            "never";
374       ReportError(s.str());
375     } else if (!macro->signature().return_type->IsVoid()) {
376       std::stringstream s;
377       s << "macro " << macro->ReadableName()
378         << " expects to return a value but doesn't on all paths";
379       ReportError(s.str());
380     }
381   }
382   if (!result->IsNever()) {
383     assembler().Goto(macro_end);
384   }
385 
386   if (macro->HasReturns() || !result->IsNever()) {
387     assembler().Bind(macro_end);
388   }
389 
390   return GetAndClearReturnValue();
391 }
392 
VisitMacroCommon(Macro * macro)393 void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
394   CurrentCallable::Scope current_callable(macro);
395   const Signature& signature = macro->signature();
396   const Type* return_type = macro->signature().return_type;
397   bool can_return = return_type != TypeOracle::GetNeverType();
398   bool has_return_value =
399       can_return && return_type != TypeOracle::GetVoidType();
400 
401   cpp::Function f = GenerateMacroFunctionDeclaration(macro);
402   f.PrintDeclaration(csa_headerfile());
403   csa_headerfile() << "\n";
404 
405   cpp::File csa_cc(csa_ccfile());
406 
407   // Avoid multiple-definition errors since it is possible for multiple
408   // generated -inl.inc files to all contain function definitions for the same
409   // Torque macro.
410   base::Optional<cpp::IncludeGuardScope> include_guard;
411   if (output_type_ == OutputType::kCC) {
412     include_guard.emplace(&csa_cc, "V8_INTERNAL_DEFINED_"s + macro->CCName());
413   } else if (output_type_ == OutputType::kCCDebug) {
414     include_guard.emplace(&csa_cc,
415                           "V8_INTERNAL_DEFINED_"s + macro->CCDebugName());
416   }
417 
418   f.PrintBeginDefinition(csa_ccfile());
419 
420   if (output_type_ == OutputType::kCC) {
421     // For now, generated C++ is only for field offset computations. If we ever
422     // generate C++ code that can allocate, then it should be handlified.
423     csa_ccfile() << "  DisallowGarbageCollection no_gc;\n";
424   } else if (output_type_ == OutputType::kCSA) {
425     csa_ccfile() << "  compiler::CodeAssembler ca_(state_);\n";
426     csa_ccfile()
427         << "  compiler::CodeAssembler::SourcePositionScope pos_scope(&ca_);\n";
428   }
429 
430   Stack<std::string> lowered_parameters;
431   Stack<const Type*> lowered_parameter_types;
432 
433   std::vector<VisitResult> arguments;
434 
435   base::Optional<LocationReference> this_reference;
436   if (Method* method = Method::DynamicCast(macro)) {
437     const Type* this_type = method->aggregate_type();
438     LowerParameter(this_type, ExternalParameterName(kThisParameterName),
439                    &lowered_parameters);
440     StackRange range = lowered_parameter_types.PushMany(LowerType(this_type));
441     VisitResult this_result = VisitResult(this_type, range);
442     // For classes, mark 'this' as a temporary to prevent assignment to it.
443     // Note that using a VariableAccess for non-class types is technically
444     // incorrect because changes to the 'this' variable do not get reflected
445     // to the caller. Therefore struct methods should always be inlined and a
446     // C++ version should never be generated, since it would be incorrect.
447     // However, in order to be able to type- and semantics-check even unused
448     // struct methods, set the this_reference to be the local variable copy of
449     // the passed-in this, which allows the visitor to at least find and report
450     // errors.
451     this_reference =
452         (this_type->IsClassType())
453             ? LocationReference::Temporary(this_result, "this parameter")
454             : LocationReference::VariableAccess(this_result);
455   }
456 
457   for (size_t i = 0; i < macro->signature().parameter_names.size(); ++i) {
458     if (this_reference && i == macro->signature().implicit_count) continue;
459     const std::string& name = macro->parameter_names()[i]->value;
460     std::string external_name = ExternalParameterName(name);
461     const Type* type = macro->signature().types()[i];
462 
463     if (type->IsConstexpr()) {
464       arguments.push_back(VisitResult(type, external_name));
465     } else {
466       LowerParameter(type, external_name, &lowered_parameters);
467       StackRange range = lowered_parameter_types.PushMany(LowerType(type));
468       arguments.push_back(VisitResult(type, range));
469     }
470   }
471 
472   DCHECK_EQ(lowered_parameters.Size(), lowered_parameter_types.Size());
473   assembler_ = CfgAssembler(lowered_parameter_types);
474 
475   std::vector<Block*> label_blocks;
476   for (const LabelDeclaration& label_info : signature.labels) {
477     Stack<const Type*> label_input_stack;
478     for (const Type* type : label_info.types) {
479       label_input_stack.PushMany(LowerType(type));
480     }
481     Block* block = assembler().NewBlock(std::move(label_input_stack));
482     label_blocks.push_back(block);
483   }
484 
485   VisitResult return_value =
486       InlineMacro(macro, this_reference, arguments, label_blocks);
487   Block* end = assembler().NewBlock();
488   if (return_type != TypeOracle::GetNeverType()) {
489     assembler().Goto(end);
490   }
491 
492   for (size_t i = 0; i < label_blocks.size(); ++i) {
493     Block* label_block = label_blocks[i];
494     const LabelDeclaration& label_info = signature.labels[i];
495     assembler().Bind(label_block);
496     std::vector<std::string> label_parameter_variables;
497     for (size_t j = 0; j < label_info.types.size(); ++j) {
498       LowerLabelParameter(label_info.types[j],
499                           ExternalLabelParameterName(label_info.name->value, j),
500                           &label_parameter_variables);
501     }
502     assembler().Emit(GotoExternalInstruction{
503         ExternalLabelName(label_info.name->value), label_parameter_variables});
504   }
505 
506   if (return_type != TypeOracle::GetNeverType()) {
507     assembler().Bind(end);
508   }
509 
510   base::Optional<Stack<std::string>> values;
511   if (output_type_ == OutputType::kCC) {
512     CCGenerator cc_generator{assembler().Result(), csa_ccfile()};
513     values = cc_generator.EmitGraph(lowered_parameters);
514   } else if (output_type_ == OutputType::kCCDebug) {
515     CCGenerator cc_generator{assembler().Result(), csa_ccfile(), true};
516     values = cc_generator.EmitGraph(lowered_parameters);
517   } else {
518     CSAGenerator csa_generator{assembler().Result(), csa_ccfile()};
519     values = csa_generator.EmitGraph(lowered_parameters);
520   }
521 
522   assembler_ = base::nullopt;
523 
524   if (has_return_value) {
525     csa_ccfile() << "  return ";
526     if (output_type_ == OutputType::kCCDebug) {
527       csa_ccfile() << "{d::MemoryAccessResult::kOk, ";
528       CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
529       csa_ccfile() << "}";
530     } else if (output_type_ == OutputType::kCC) {
531       CCGenerator::EmitCCValue(return_value, *values, csa_ccfile());
532     } else {
533       CSAGenerator::EmitCSAValue(return_value, *values, csa_ccfile());
534     }
535     csa_ccfile() << ";\n";
536   }
537   f.PrintEndDefinition(csa_ccfile());
538 
539   include_guard.reset();
540 }
541 
Visit(TorqueMacro * macro)542 void ImplementationVisitor::Visit(TorqueMacro* macro) {
543   VisitMacroCommon(macro);
544 }
545 
Visit(Method * method)546 void ImplementationVisitor::Visit(Method* method) {
547   DCHECK(!method->IsExternal());
548   VisitMacroCommon(method);
549 }
550 
551 namespace {
552 
AddParameter(size_t i,Builtin * builtin,Stack<std::string> * parameters,Stack<const Type * > * parameter_types,BlockBindings<LocalValue> * parameter_bindings,bool mark_as_used)553 std::string AddParameter(size_t i, Builtin* builtin,
554                          Stack<std::string>* parameters,
555                          Stack<const Type*>* parameter_types,
556                          BlockBindings<LocalValue>* parameter_bindings,
557                          bool mark_as_used) {
558   const Identifier* name = builtin->signature().parameter_names[i];
559   const Type* type = builtin->signature().types()[i];
560   std::string external_name = "parameter" + std::to_string(i);
561   parameters->Push(external_name);
562   StackRange range = parameter_types->PushMany(LowerType(type));
563   Binding<LocalValue>* binding = parameter_bindings->Add(
564       name,
565       LocalValue{LocationReference::Temporary(VisitResult(type, range),
566                                               "parameter " + name->value)},
567       mark_as_used);
568   if (GlobalContext::collect_kythe_data()) {
569     KytheData::AddBindingDefinition(binding);
570   }
571   return external_name;
572 }
573 
574 }  // namespace
575 
Visit(Builtin * builtin)576 void ImplementationVisitor::Visit(Builtin* builtin) {
577   if (builtin->IsExternal()) return;
578   CurrentScope::Scope current_scope(builtin);
579   CurrentCallable::Scope current_callable(builtin);
580   CurrentReturnValue::Scope current_return_value;
581 
582   const std::string& name = builtin->ExternalName();
583   const Signature& signature = builtin->signature();
584   csa_ccfile() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
585                << "  compiler::CodeAssemblerState* state_ = state();"
586                << "  compiler::CodeAssembler ca_(state());\n";
587 
588   Stack<const Type*> parameter_types;
589   Stack<std::string> parameters;
590 
591   BindingsManagersScope bindings_managers_scope;
592 
593   BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
594 
595   if (builtin->IsVarArgsJavaScript() || builtin->IsFixedArgsJavaScript()) {
596     if (builtin->IsVarArgsJavaScript()) {
597       DCHECK(signature.parameter_types.var_args);
598       if (signature.ExplicitCount() > 0) {
599         Error("Cannot mix explicit parameters with varargs.")
600             .Position(signature.parameter_names[signature.implicit_count]->pos);
601       }
602 
603       csa_ccfile() << "  TNode<Word32T> argc = UncheckedParameter<Word32T>("
604                    << "Descriptor::kJSActualArgumentsCount);\n";
605       csa_ccfile() << "  TNode<IntPtrT> "
606                       "arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
607                       "Int32T>(argc)));\n";
608       csa_ccfile() << "  TNode<RawPtrT> arguments_frame = "
609                       "UncheckedCast<RawPtrT>(LoadFramePointer());\n";
610       csa_ccfile()
611           << "  TorqueStructArguments "
612              "torque_arguments(GetFrameArguments(arguments_frame, "
613              "arguments_length, FrameArgumentsArgcType::kCountIncludesReceiver"
614           << "));\n";
615       csa_ccfile()
616           << "  CodeStubArguments arguments(this, torque_arguments);\n";
617 
618       parameters.Push("torque_arguments.frame");
619       parameters.Push("torque_arguments.base");
620       parameters.Push("torque_arguments.length");
621       parameters.Push("torque_arguments.actual_count");
622       const Type* arguments_type = TypeOracle::GetArgumentsType();
623       StackRange range = parameter_types.PushMany(LowerType(arguments_type));
624       parameter_bindings.Add(*signature.arguments_variable,
625                              LocalValue{LocationReference::Temporary(
626                                  VisitResult(arguments_type, range),
627                                  "parameter " + *signature.arguments_variable)},
628                              true);
629     }
630 
631     for (size_t i = 0; i < signature.implicit_count; ++i) {
632       const std::string& param_name = signature.parameter_names[i]->value;
633       SourcePosition param_pos = signature.parameter_names[i]->pos;
634       std::string generated_name = AddParameter(
635           i, builtin, &parameters, &parameter_types, &parameter_bindings, true);
636       const Type* actual_type = signature.parameter_types.types[i];
637       std::vector<const Type*> expected_types;
638       if (param_name == "context") {
639         csa_ccfile() << "  TNode<NativeContext> " << generated_name
640                      << " = UncheckedParameter<NativeContext>("
641                      << "Descriptor::kContext);\n";
642         csa_ccfile() << "  USE(" << generated_name << ");\n";
643         expected_types = {TypeOracle::GetNativeContextType(),
644                           TypeOracle::GetContextType()};
645       } else if (param_name == "receiver") {
646         csa_ccfile()
647             << "  TNode<Object> " << generated_name << " = "
648             << (builtin->IsVarArgsJavaScript()
649                     ? "arguments.GetReceiver()"
650                     : "UncheckedParameter<Object>(Descriptor::kReceiver)")
651             << ";\n";
652         csa_ccfile() << "  USE(" << generated_name << ");\n";
653         expected_types = {TypeOracle::GetJSAnyType()};
654       } else if (param_name == "newTarget") {
655         csa_ccfile() << "  TNode<Object> " << generated_name
656                      << " = UncheckedParameter<Object>("
657                      << "Descriptor::kJSNewTarget);\n";
658         csa_ccfile() << "USE(" << generated_name << ");\n";
659         expected_types = {TypeOracle::GetJSAnyType()};
660       } else if (param_name == "target") {
661         csa_ccfile() << "  TNode<JSFunction> " << generated_name
662                      << " = UncheckedParameter<JSFunction>("
663                      << "Descriptor::kJSTarget);\n";
664         csa_ccfile() << "USE(" << generated_name << ");\n";
665         expected_types = {TypeOracle::GetJSFunctionType()};
666       } else {
667         Error(
668             "Unexpected implicit parameter \"", param_name,
669             "\" for JavaScript calling convention, "
670             "expected \"context\", \"receiver\", \"target\", or \"newTarget\"")
671             .Position(param_pos);
672         expected_types = {actual_type};
673       }
674       if (std::find(expected_types.begin(), expected_types.end(),
675                     actual_type) == expected_types.end()) {
676         Error("According to JavaScript calling convention, expected parameter ",
677               param_name, " to have type ", PrintList(expected_types, " or "),
678               " but found type ", *actual_type)
679             .Position(param_pos);
680       }
681     }
682 
683     for (size_t i = signature.implicit_count;
684          i < signature.parameter_names.size(); ++i) {
685       const std::string& parameter_name = signature.parameter_names[i]->value;
686       const Type* type = signature.types()[i];
687       const bool mark_as_used = signature.implicit_count > i;
688       std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
689                                      &parameter_bindings, mark_as_used);
690       csa_ccfile() << "  " << type->GetGeneratedTypeName() << " " << var
691                    << " = "
692                    << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
693                    << ">(Descriptor::k" << CamelifyString(parameter_name)
694                    << ");\n";
695       csa_ccfile() << "  USE(" << var << ");\n";
696     }
697 
698   } else {
699     DCHECK(builtin->IsStub());
700 
701     for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
702       const std::string& parameter_name = signature.parameter_names[i]->value;
703       const Type* type = signature.types()[i];
704       const bool mark_as_used = signature.implicit_count > i;
705       std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
706                                      &parameter_bindings, mark_as_used);
707       csa_ccfile() << "  " << type->GetGeneratedTypeName() << " " << var
708                    << " = "
709                    << "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
710                    << ">(Descriptor::k" << CamelifyString(parameter_name)
711                    << ");\n";
712       csa_ccfile() << "  USE(" << var << ");\n";
713     }
714   }
715   assembler_ = CfgAssembler(parameter_types);
716   const Type* body_result = Visit(*builtin->body());
717   if (body_result != TypeOracle::GetNeverType()) {
718     ReportError("control reaches end of builtin, expected return of a value");
719   }
720   CSAGenerator csa_generator{assembler().Result(), csa_ccfile(),
721                              builtin->kind()};
722   csa_generator.EmitGraph(parameters);
723   assembler_ = base::nullopt;
724   csa_ccfile() << "}\n\n";
725 }
726 
Visit(VarDeclarationStatement * stmt)727 const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
728   BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
729   return Visit(stmt, &block_bindings);
730 }
731 
Visit(VarDeclarationStatement * stmt,BlockBindings<LocalValue> * block_bindings)732 const Type* ImplementationVisitor::Visit(
733     VarDeclarationStatement* stmt, BlockBindings<LocalValue>* block_bindings) {
734   // const qualified variables are required to be initialized properly.
735   if (stmt->const_qualified && !stmt->initializer) {
736     ReportError("local constant \"", stmt->name, "\" is not initialized.");
737   }
738 
739   base::Optional<const Type*> type;
740   if (stmt->type) {
741     type = TypeVisitor::ComputeType(*stmt->type);
742   }
743   base::Optional<VisitResult> init_result;
744   if (stmt->initializer) {
745     StackScope scope(this);
746     init_result = Visit(*stmt->initializer);
747     if (type) {
748       init_result = GenerateImplicitConvert(*type, *init_result);
749     }
750     type = init_result->type();
751     if ((*type)->IsConstexpr() && !stmt->const_qualified) {
752       Error("Use 'const' instead of 'let' for variable '", stmt->name->value,
753             "' of constexpr type '", (*type)->ToString(), "'.")
754           .Position(stmt->name->pos)
755           .Throw();
756     }
757     init_result = scope.Yield(*init_result);
758   } else {
759     DCHECK(type.has_value());
760     if ((*type)->IsConstexpr()) {
761       ReportError("constexpr variables need an initializer");
762     }
763     TypeVector lowered_types = LowerType(*type);
764     for (const Type* t : lowered_types) {
765       assembler().Emit(PushUninitializedInstruction{TypeOracle::GetTopType(
766           "uninitialized variable '" + stmt->name->value + "' of type " +
767               t->ToString() + " originally defined at " +
768               PositionAsString(stmt->pos),
769           t)});
770     }
771     init_result =
772         VisitResult(*type, assembler().TopRange(lowered_types.size()));
773   }
774   LocationReference ref = stmt->const_qualified
775                               ? LocationReference::Temporary(
776                                     *init_result, "const " + stmt->name->value)
777                               : LocationReference::VariableAccess(*init_result);
778   block_bindings->Add(stmt->name, LocalValue{std::move(ref)});
779   return TypeOracle::GetVoidType();
780 }
781 
Visit(TailCallStatement * stmt)782 const Type* ImplementationVisitor::Visit(TailCallStatement* stmt) {
783   return Visit(stmt->call, true).type();
784 }
785 
Visit(ConditionalExpression * expr)786 VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
787   Block* true_block = assembler().NewBlock(assembler().CurrentStack());
788   Block* false_block = assembler().NewBlock(assembler().CurrentStack());
789   Block* done_block = assembler().NewBlock();
790   Block* true_conversion_block = assembler().NewBlock();
791   GenerateExpressionBranch(expr->condition, true_block, false_block);
792 
793   VisitResult left;
794   VisitResult right;
795 
796   {
797     // The code for both paths of the conditional need to be generated first
798     // before evaluating the conditional expression because the common type of
799     // the result of both the true and false of the condition needs to be known
800     // to convert both branches to a common type.
801     assembler().Bind(true_block);
802     StackScope left_scope(this);
803     left = Visit(expr->if_true);
804     assembler().Goto(true_conversion_block);
805 
806     const Type* common_type;
807     {
808       assembler().Bind(false_block);
809       StackScope right_scope(this);
810       right = Visit(expr->if_false);
811       common_type = GetCommonType(left.type(), right.type());
812       right = right_scope.Yield(GenerateImplicitConvert(common_type, right));
813       assembler().Goto(done_block);
814     }
815 
816     assembler().Bind(true_conversion_block);
817     left = left_scope.Yield(GenerateImplicitConvert(common_type, left));
818     assembler().Goto(done_block);
819   }
820 
821   assembler().Bind(done_block);
822   CHECK_EQ(left, right);
823   return left;
824 }
825 
Visit(LogicalOrExpression * expr)826 VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
827   StackScope outer_scope(this);
828   VisitResult left_result = Visit(expr->left);
829 
830   if (left_result.type()->IsConstexprBool()) {
831     VisitResult right_result = Visit(expr->right);
832     if (!right_result.type()->IsConstexprBool()) {
833       ReportError(
834           "expected type constexpr bool on right-hand side of operator "
835           "||");
836     }
837     return VisitResult(TypeOracle::GetConstexprBoolType(),
838                        std::string("(") + left_result.constexpr_value() +
839                            " || " + right_result.constexpr_value() + ")");
840   }
841 
842   Block* true_block = assembler().NewBlock();
843   Block* false_block = assembler().NewBlock();
844   Block* done_block = assembler().NewBlock();
845 
846   left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
847   GenerateBranch(left_result, true_block, false_block);
848 
849   assembler().Bind(true_block);
850   VisitResult true_result = GenerateBoolConstant(true);
851   assembler().Goto(done_block);
852 
853   assembler().Bind(false_block);
854   VisitResult false_result;
855   {
856     StackScope false_block_scope(this);
857     false_result = false_block_scope.Yield(
858         GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
859   }
860   assembler().Goto(done_block);
861 
862   assembler().Bind(done_block);
863   DCHECK_EQ(true_result, false_result);
864   return outer_scope.Yield(true_result);
865 }
866 
Visit(LogicalAndExpression * expr)867 VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
868   StackScope outer_scope(this);
869   VisitResult left_result = Visit(expr->left);
870 
871   if (left_result.type()->IsConstexprBool()) {
872     VisitResult right_result = Visit(expr->right);
873     if (!right_result.type()->IsConstexprBool()) {
874       ReportError(
875           "expected type constexpr bool on right-hand side of operator "
876           "&&");
877     }
878     return VisitResult(TypeOracle::GetConstexprBoolType(),
879                        std::string("(") + left_result.constexpr_value() +
880                            " && " + right_result.constexpr_value() + ")");
881   }
882 
883   Block* true_block = assembler().NewBlock();
884   Block* false_block = assembler().NewBlock();
885   Block* done_block = assembler().NewBlock();
886 
887   left_result = GenerateImplicitConvert(TypeOracle::GetBoolType(), left_result);
888   GenerateBranch(left_result, true_block, false_block);
889 
890   assembler().Bind(true_block);
891   VisitResult true_result;
892   {
893     StackScope true_block_scope(this);
894     VisitResult right_result = Visit(expr->right);
895     if (TryGetSourceForBitfieldExpression(expr->left) != nullptr &&
896         TryGetSourceForBitfieldExpression(expr->right) != nullptr &&
897         TryGetSourceForBitfieldExpression(expr->left)->value ==
898             TryGetSourceForBitfieldExpression(expr->right)->value) {
899       Lint(
900           "Please use & rather than && when checking multiple bitfield "
901           "values, to avoid complexity in generated code.");
902     }
903     true_result = true_block_scope.Yield(
904         GenerateImplicitConvert(TypeOracle::GetBoolType(), right_result));
905   }
906   assembler().Goto(done_block);
907 
908   assembler().Bind(false_block);
909   VisitResult false_result = GenerateBoolConstant(false);
910   assembler().Goto(done_block);
911 
912   assembler().Bind(done_block);
913   DCHECK_EQ(true_result, false_result);
914   return outer_scope.Yield(true_result);
915 }
916 
Visit(IncrementDecrementExpression * expr)917 VisitResult ImplementationVisitor::Visit(IncrementDecrementExpression* expr) {
918   StackScope scope(this);
919   LocationReference location_ref = GetLocationReference(expr->location);
920   VisitResult current_value = GenerateFetchFromLocation(location_ref);
921   VisitResult one = {TypeOracle::GetConstInt31Type(), "1"};
922   Arguments args;
923   args.parameters = {current_value, one};
924   VisitResult assignment_value = GenerateCall(
925       expr->op == IncrementDecrementOperator::kIncrement ? "+" : "-", args);
926   GenerateAssignToLocation(location_ref, assignment_value);
927   return scope.Yield(expr->postfix ? current_value : assignment_value);
928 }
929 
Visit(AssignmentExpression * expr)930 VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
931   StackScope scope(this);
932   LocationReference location_ref = GetLocationReference(expr->location);
933   VisitResult assignment_value;
934   if (expr->op) {
935     VisitResult location_value = GenerateFetchFromLocation(location_ref);
936     assignment_value = Visit(expr->value);
937     Arguments args;
938     args.parameters = {location_value, assignment_value};
939     assignment_value = GenerateCall(*expr->op, args);
940     GenerateAssignToLocation(location_ref, assignment_value);
941   } else {
942     assignment_value = Visit(expr->value);
943     GenerateAssignToLocation(location_ref, assignment_value);
944   }
945   return scope.Yield(assignment_value);
946 }
947 
Visit(FloatingPointLiteralExpression * expr)948 VisitResult ImplementationVisitor::Visit(FloatingPointLiteralExpression* expr) {
949   const Type* result_type = TypeOracle::GetConstFloat64Type();
950   std::stringstream str;
951   str << std::setprecision(std::numeric_limits<double>::digits10 + 1)
952       << expr->value;
953   return VisitResult{result_type, str.str()};
954 }
955 
Visit(IntegerLiteralExpression * expr)956 VisitResult ImplementationVisitor::Visit(IntegerLiteralExpression* expr) {
957   const Type* result_type = TypeOracle::GetIntegerLiteralType();
958   std::stringstream str;
959   str << "IntegerLiteral("
960       << (expr->value.is_negative() ? "true, 0x" : "false, 0x") << std::hex
961       << expr->value.absolute_value() << std::dec << "ull)";
962   return VisitResult{result_type, str.str()};
963 }
964 
Visit(AssumeTypeImpossibleExpression * expr)965 VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
966   VisitResult result = Visit(expr->expression);
967   const Type* result_type = SubtractType(
968       result.type(), TypeVisitor::ComputeType(expr->excluded_type));
969   if (result_type->IsNever()) {
970     ReportError("unreachable code");
971   }
972   CHECK_EQ(LowerType(result_type), TypeVector{result_type});
973   assembler().Emit(UnsafeCastInstruction{result_type});
974   result.SetType(result_type);
975   return result;
976 }
977 
Visit(StringLiteralExpression * expr)978 VisitResult ImplementationVisitor::Visit(StringLiteralExpression* expr) {
979   return VisitResult{
980       TypeOracle::GetConstStringType(),
981       "\"" + expr->literal.substr(1, expr->literal.size() - 2) + "\""};
982 }
983 
GetBuiltinCode(Builtin * builtin)984 VisitResult ImplementationVisitor::GetBuiltinCode(Builtin* builtin) {
985   if (builtin->IsExternal() || builtin->kind() != Builtin::kStub) {
986     ReportError(
987         "creating function pointers is only allowed for internal builtins with "
988         "stub linkage");
989   }
990   const Type* type = TypeOracle::GetBuiltinPointerType(
991       builtin->signature().parameter_types.types,
992       builtin->signature().return_type);
993   assembler().Emit(
994       PushBuiltinPointerInstruction{builtin->ExternalName(), type});
995   return VisitResult(type, assembler().TopRange(1));
996 }
997 
Visit(LocationExpression * expr)998 VisitResult ImplementationVisitor::Visit(LocationExpression* expr) {
999   StackScope scope(this);
1000   return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
1001 }
1002 
Visit(FieldAccessExpression * expr)1003 VisitResult ImplementationVisitor::Visit(FieldAccessExpression* expr) {
1004   StackScope scope(this);
1005   LocationReference location = GetLocationReference(expr);
1006   if (location.IsBitFieldAccess()) {
1007     if (auto* identifier = IdentifierExpression::DynamicCast(expr->object)) {
1008       bitfield_expressions_[expr] = identifier->name;
1009     }
1010   }
1011   return scope.Yield(GenerateFetchFromLocation(location));
1012 }
1013 
Visit(GotoStatement * stmt)1014 const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
1015   Binding<LocalLabel>* label = LookupLabel(stmt->label->value);
1016   size_t parameter_count = label->parameter_types.size();
1017   if (stmt->arguments.size() != parameter_count) {
1018     ReportError("goto to label has incorrect number of parameters (expected ",
1019                 parameter_count, " found ", stmt->arguments.size(), ")");
1020   }
1021 
1022   if (GlobalContext::collect_language_server_data()) {
1023     LanguageServerData::AddDefinition(stmt->label->pos,
1024                                       label->declaration_position());
1025   }
1026   if (GlobalContext::collect_kythe_data()) {
1027     KytheData::AddBindingUse(stmt->label->pos, label);
1028   }
1029 
1030   size_t i = 0;
1031   StackRange arguments = assembler().TopRange(0);
1032   for (Expression* e : stmt->arguments) {
1033     StackScope scope(this);
1034     VisitResult result = Visit(e);
1035     const Type* parameter_type = label->parameter_types[i++];
1036     result = GenerateImplicitConvert(parameter_type, result);
1037     arguments.Extend(scope.Yield(result).stack_range());
1038   }
1039 
1040   assembler().Goto(label->block, arguments.Size());
1041   return TypeOracle::GetNeverType();
1042 }
1043 
Visit(IfStatement * stmt)1044 const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
1045   bool has_else = stmt->if_false.has_value();
1046 
1047   if (stmt->is_constexpr) {
1048     VisitResult expression_result = Visit(stmt->condition);
1049 
1050     if (!(expression_result.type() == TypeOracle::GetConstexprBoolType())) {
1051       std::stringstream stream;
1052       stream << "expression should return type constexpr bool "
1053              << "but returns type " << *expression_result.type();
1054       ReportError(stream.str());
1055     }
1056 
1057     Block* true_block = assembler().NewBlock();
1058     Block* false_block = assembler().NewBlock();
1059     Block* done_block = assembler().NewBlock();
1060 
1061     assembler().Emit(ConstexprBranchInstruction{
1062         expression_result.constexpr_value(), true_block, false_block});
1063 
1064     assembler().Bind(true_block);
1065     const Type* left_result = Visit(stmt->if_true);
1066     if (left_result == TypeOracle::GetVoidType()) {
1067       assembler().Goto(done_block);
1068     }
1069 
1070     assembler().Bind(false_block);
1071     const Type* right_result = TypeOracle::GetVoidType();
1072     if (has_else) {
1073       right_result = Visit(*stmt->if_false);
1074     }
1075     if (right_result == TypeOracle::GetVoidType()) {
1076       assembler().Goto(done_block);
1077     }
1078 
1079     if (left_result->IsNever() != right_result->IsNever()) {
1080       std::stringstream stream;
1081       stream << "either both or neither branches in a constexpr if statement "
1082                 "must reach their end at"
1083              << PositionAsString(stmt->pos);
1084       ReportError(stream.str());
1085     }
1086 
1087     if (left_result != TypeOracle::GetNeverType()) {
1088       assembler().Bind(done_block);
1089     }
1090     return left_result;
1091   } else {
1092     Block* true_block = assembler().NewBlock(assembler().CurrentStack(),
1093                                              IsDeferred(stmt->if_true));
1094     Block* false_block =
1095         assembler().NewBlock(assembler().CurrentStack(),
1096                              stmt->if_false && IsDeferred(*stmt->if_false));
1097     GenerateExpressionBranch(stmt->condition, true_block, false_block);
1098 
1099     Block* done_block;
1100     bool live = false;
1101     if (has_else) {
1102       done_block = assembler().NewBlock();
1103     } else {
1104       done_block = false_block;
1105       live = true;
1106     }
1107 
1108     assembler().Bind(true_block);
1109     {
1110       const Type* result = Visit(stmt->if_true);
1111       if (result == TypeOracle::GetVoidType()) {
1112         live = true;
1113         assembler().Goto(done_block);
1114       }
1115     }
1116 
1117     if (has_else) {
1118       assembler().Bind(false_block);
1119       const Type* result = Visit(*stmt->if_false);
1120       if (result == TypeOracle::GetVoidType()) {
1121         live = true;
1122         assembler().Goto(done_block);
1123       }
1124     }
1125 
1126     if (live) {
1127       assembler().Bind(done_block);
1128     }
1129     return live ? TypeOracle::GetVoidType() : TypeOracle::GetNeverType();
1130   }
1131 }
1132 
Visit(WhileStatement * stmt)1133 const Type* ImplementationVisitor::Visit(WhileStatement* stmt) {
1134   Block* body_block = assembler().NewBlock(assembler().CurrentStack());
1135   Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
1136 
1137   Block* header_block = assembler().NewBlock();
1138   assembler().Goto(header_block);
1139 
1140   assembler().Bind(header_block);
1141   GenerateExpressionBranch(stmt->condition, body_block, exit_block);
1142 
1143   assembler().Bind(body_block);
1144   {
1145     BreakContinueActivator activator{exit_block, header_block};
1146     const Type* body_result = Visit(stmt->body);
1147     if (body_result != TypeOracle::GetNeverType()) {
1148       assembler().Goto(header_block);
1149     }
1150   }
1151 
1152   assembler().Bind(exit_block);
1153   return TypeOracle::GetVoidType();
1154 }
1155 
Visit(BlockStatement * block)1156 const Type* ImplementationVisitor::Visit(BlockStatement* block) {
1157   BlockBindings<LocalValue> block_bindings(&ValueBindingsManager::Get());
1158   const Type* type = TypeOracle::GetVoidType();
1159   for (Statement* s : block->statements) {
1160     CurrentSourcePosition::Scope source_position(s->pos);
1161     if (type->IsNever()) {
1162       ReportError("statement after non-returning statement");
1163     }
1164     if (auto* var_declaration = VarDeclarationStatement::DynamicCast(s)) {
1165       type = Visit(var_declaration, &block_bindings);
1166     } else {
1167       type = Visit(s);
1168     }
1169   }
1170   return type;
1171 }
1172 
Visit(DebugStatement * stmt)1173 const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
1174 #if defined(DEBUG)
1175   assembler().Emit(PrintConstantStringInstruction{"halting because of '" +
1176                                                   stmt->reason + "' at " +
1177                                                   PositionAsString(stmt->pos)});
1178 #endif
1179   assembler().Emit(AbortInstruction{stmt->never_continues
1180                                         ? AbortInstruction::Kind::kUnreachable
1181                                         : AbortInstruction::Kind::kDebugBreak});
1182   if (stmt->never_continues) {
1183     return TypeOracle::GetNeverType();
1184   } else {
1185     return TypeOracle::GetVoidType();
1186   }
1187 }
1188 
1189 namespace {
1190 
FormatAssertSource(const std::string & str)1191 std::string FormatAssertSource(const std::string& str) {
1192   // Replace all whitespace characters with a space character.
1193   std::string str_no_newlines = str;
1194   std::replace_if(
1195       str_no_newlines.begin(), str_no_newlines.end(),
1196       [](unsigned char c) { return isspace(c); }, ' ');
1197 
1198   // str might include indentation, squash multiple space characters into one.
1199   std::string result;
1200   std::unique_copy(str_no_newlines.begin(), str_no_newlines.end(),
1201                    std::back_inserter(result),
1202                    [](char a, char b) { return a == ' ' && b == ' '; });
1203   return result;
1204 }
1205 
1206 }  // namespace
1207 
Visit(AssertStatement * stmt)1208 const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
1209   if (stmt->kind == AssertStatement::AssertKind::kStaticAssert) {
1210     std::string message =
1211         "static_assert(" + stmt->source + ") at " + ToString(stmt->pos);
1212     GenerateCall(QualifiedName({"", TORQUE_INTERNAL_NAMESPACE_STRING},
1213                                STATIC_ASSERT_MACRO_STRING),
1214                  Arguments{{Visit(stmt->expression),
1215                             VisitResult(TypeOracle::GetConstexprStringType(),
1216                                         StringLiteralQuote(message))},
1217                            {}});
1218     return TypeOracle::GetVoidType();
1219   }
1220   bool do_check = stmt->kind != AssertStatement::AssertKind::kDcheck ||
1221                   GlobalContext::force_assert_statements();
1222 #if defined(DEBUG)
1223   do_check = true;
1224 #endif
1225   Block* resume_block;
1226 
1227   if (!do_check) {
1228     Block* unreachable_block = assembler().NewBlock(assembler().CurrentStack());
1229     resume_block = assembler().NewBlock(assembler().CurrentStack());
1230     assembler().Goto(resume_block);
1231     assembler().Bind(unreachable_block);
1232   }
1233 
1234   // CSA_DCHECK & co. are not used here on purpose for two reasons. First,
1235   // Torque allows and handles two types of expressions in the if protocol
1236   // automagically, ones that return TNode<BoolT> and those that use the
1237   // BranchIf(..., Label* true, Label* false) idiom. Because the machinery to
1238   // handle this is embedded in the expression handling and to it's not
1239   // possible to make the decision to use CSA_DCHECK or CSA_DCHECK_BRANCH
1240   // isn't trivial up-front. Secondly, on failure, the assert text should be
1241   // the corresponding Torque code, not the -gen.cc code, which would be the
1242   // case when using CSA_DCHECK_XXX.
1243   Block* true_block = assembler().NewBlock(assembler().CurrentStack());
1244   Block* false_block = assembler().NewBlock(assembler().CurrentStack(), true);
1245   GenerateExpressionBranch(stmt->expression, true_block, false_block);
1246 
1247   assembler().Bind(false_block);
1248 
1249   assembler().Emit(AbortInstruction{
1250       AbortInstruction::Kind::kAssertionFailure,
1251       "Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});
1252 
1253   assembler().Bind(true_block);
1254 
1255   if (!do_check) {
1256     assembler().Bind(resume_block);
1257   }
1258 
1259   return TypeOracle::GetVoidType();
1260 }
1261 
Visit(ExpressionStatement * stmt)1262 const Type* ImplementationVisitor::Visit(ExpressionStatement* stmt) {
1263   const Type* type = Visit(stmt->expression).type();
1264   return type->IsNever() ? type : TypeOracle::GetVoidType();
1265 }
1266 
Visit(ReturnStatement * stmt)1267 const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
1268   Callable* current_callable = CurrentCallable::Get();
1269   if (current_callable->signature().return_type->IsNever()) {
1270     std::stringstream s;
1271     s << "cannot return from a function with return type never";
1272     ReportError(s.str());
1273   }
1274   LocalLabel* end =
1275       current_callable->IsMacro() ? LookupLabel(kMacroEndLabelName) : nullptr;
1276   if (current_callable->HasReturnValue()) {
1277     if (!stmt->value) {
1278       std::stringstream s;
1279       s << "return expression needs to be specified for a return type of "
1280         << *current_callable->signature().return_type;
1281       ReportError(s.str());
1282     }
1283     VisitResult expression_result = Visit(*stmt->value);
1284     VisitResult return_result = GenerateImplicitConvert(
1285         current_callable->signature().return_type, expression_result);
1286     if (current_callable->IsMacro()) {
1287       if (return_result.IsOnStack()) {
1288         StackRange return_value_range =
1289             GenerateLabelGoto(end, return_result.stack_range());
1290         SetReturnValue(VisitResult(return_result.type(), return_value_range));
1291       } else {
1292         GenerateLabelGoto(end);
1293         SetReturnValue(return_result);
1294       }
1295     } else if (current_callable->IsBuiltin()) {
1296       assembler().Emit(ReturnInstruction{
1297           LoweredSlotCount(current_callable->signature().return_type)});
1298     } else {
1299       UNREACHABLE();
1300     }
1301   } else {
1302     if (stmt->value) {
1303       std::stringstream s;
1304       s << "return expression can't be specified for a void or never return "
1305            "type";
1306       ReportError(s.str());
1307     }
1308     GenerateLabelGoto(end);
1309   }
1310   current_callable->IncrementReturns();
1311   return TypeOracle::GetNeverType();
1312 }
1313 
Visit(TryLabelExpression * expr)1314 VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
1315   size_t parameter_count = expr->label_block->parameters.names.size();
1316   std::vector<VisitResult> parameters;
1317 
1318   Block* label_block = nullptr;
1319   Block* done_block = assembler().NewBlock();
1320   VisitResult try_result;
1321 
1322   {
1323     CurrentSourcePosition::Scope source_position(expr->label_block->pos);
1324     if (expr->label_block->parameters.has_varargs) {
1325       ReportError("cannot use ... for label parameters");
1326     }
1327     Stack<const Type*> label_input_stack = assembler().CurrentStack();
1328     TypeVector parameter_types;
1329     for (size_t i = 0; i < parameter_count; ++i) {
1330       const Type* type =
1331           TypeVisitor::ComputeType(expr->label_block->parameters.types[i]);
1332       parameter_types.push_back(type);
1333       if (type->IsConstexpr()) {
1334         ReportError("no constexpr type allowed for label arguments");
1335       }
1336       StackRange range = label_input_stack.PushMany(LowerType(type));
1337       parameters.push_back(VisitResult(type, range));
1338     }
1339     label_block = assembler().NewBlock(label_input_stack,
1340                                        IsDeferred(expr->label_block->body));
1341 
1342     Binding<LocalLabel> label_binding{&LabelBindingsManager::Get(),
1343                                       expr->label_block->label,
1344                                       LocalLabel{label_block, parameter_types}};
1345 
1346     // Visit try
1347     StackScope stack_scope(this);
1348     try_result = Visit(expr->try_expression);
1349     if (try_result.type() != TypeOracle::GetNeverType()) {
1350       try_result = stack_scope.Yield(try_result);
1351       assembler().Goto(done_block);
1352     }
1353   }
1354 
1355   // Visit and output the code for the label block. If the label block falls
1356   // through, then the try must not return a value. Also, if the try doesn't
1357   // fall through, but the label does, then overall the try-label block
1358   // returns type void.
1359   assembler().Bind(label_block);
1360   const Type* label_result;
1361   {
1362     BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
1363     for (size_t i = 0; i < parameter_count; ++i) {
1364       Identifier* name = expr->label_block->parameters.names[i];
1365       parameter_bindings.Add(name,
1366                              LocalValue{LocationReference::Temporary(
1367                                  parameters[i], "parameter " + name->value)});
1368     }
1369 
1370     label_result = Visit(expr->label_block->body);
1371   }
1372   if (!try_result.type()->IsVoidOrNever() && label_result->IsVoid()) {
1373     ReportError(
1374         "otherwise clauses cannot fall through in a non-void expression");
1375   }
1376   if (label_result != TypeOracle::GetNeverType()) {
1377     assembler().Goto(done_block);
1378   }
1379   if (label_result->IsVoid() && try_result.type()->IsNever()) {
1380     try_result =
1381         VisitResult(TypeOracle::GetVoidType(), try_result.stack_range());
1382   }
1383 
1384   if (!try_result.type()->IsNever()) {
1385     assembler().Bind(done_block);
1386   }
1387   return try_result;
1388 }
1389 
Visit(StatementExpression * expr)1390 VisitResult ImplementationVisitor::Visit(StatementExpression* expr) {
1391   return VisitResult{Visit(expr->statement), assembler().TopRange(0)};
1392 }
1393 
VisitInitializerResults(const ClassType * class_type,const std::vector<NameAndExpression> & initializers)1394 InitializerResults ImplementationVisitor::VisitInitializerResults(
1395     const ClassType* class_type,
1396     const std::vector<NameAndExpression>& initializers) {
1397   InitializerResults result;
1398   for (const NameAndExpression& initializer : initializers) {
1399     result.names.push_back(initializer.name);
1400     Expression* e = initializer.expression;
1401     const Field& field = class_type->LookupField(initializer.name->value);
1402     bool has_index = field.index.has_value();
1403     if (SpreadExpression* s = SpreadExpression::DynamicCast(e)) {
1404       if (!has_index) {
1405         ReportError(
1406             "spread expressions can only be used to initialize indexed class "
1407             "fields ('",
1408             initializer.name->value, "' is not)");
1409       }
1410       e = s->spreadee;
1411     } else if (has_index) {
1412       ReportError("the indexed class field '", initializer.name->value,
1413                   "' must be initialized with a spread operator");
1414     }
1415     result.field_value_map[field.name_and_type.name] = Visit(e);
1416   }
1417   return result;
1418 }
1419 
GenerateFieldReference(VisitResult object,const Field & field,const ClassType * class_type,bool treat_optional_as_indexed)1420 LocationReference ImplementationVisitor::GenerateFieldReference(
1421     VisitResult object, const Field& field, const ClassType* class_type,
1422     bool treat_optional_as_indexed) {
1423   if (field.index.has_value()) {
1424     LocationReference slice = LocationReference::HeapSlice(
1425         GenerateCall(class_type->GetSliceMacroName(field), {{object}, {}}));
1426     if (field.index->optional && !treat_optional_as_indexed) {
1427       // This field was declared using optional syntax, so any reference to it
1428       // is implicitly a reference to the first item.
1429       return GenerateReferenceToItemInHeapSlice(
1430           slice, {TypeOracle::GetConstInt31Type(), "0"});
1431     } else {
1432       return slice;
1433     }
1434   }
1435   DCHECK(field.offset.has_value());
1436   StackRange result_range = assembler().TopRange(0);
1437   result_range.Extend(GenerateCopy(object).stack_range());
1438   VisitResult offset =
1439       VisitResult(TypeOracle::GetConstInt31Type(), ToString(*field.offset));
1440   offset = GenerateImplicitConvert(TypeOracle::GetIntPtrType(), offset);
1441   result_range.Extend(offset.stack_range());
1442   const Type* type = TypeOracle::GetReferenceType(field.name_and_type.type,
1443                                                   field.const_qualified);
1444   return LocationReference::HeapReference(VisitResult(type, result_range));
1445 }
1446 
1447 // This is used to generate field references during initialization, where we can
1448 // re-use the offsets used for computing the allocation size.
GenerateFieldReferenceForInit(VisitResult object,const Field & field,const LayoutForInitialization & layout)1449 LocationReference ImplementationVisitor::GenerateFieldReferenceForInit(
1450     VisitResult object, const Field& field,
1451     const LayoutForInitialization& layout) {
1452   StackRange result_range = assembler().TopRange(0);
1453   result_range.Extend(GenerateCopy(object).stack_range());
1454   VisitResult offset = GenerateImplicitConvert(
1455       TypeOracle::GetIntPtrType(), layout.offsets.at(field.name_and_type.name));
1456   result_range.Extend(offset.stack_range());
1457   if (field.index) {
1458     VisitResult length =
1459         GenerateCopy(layout.array_lengths.at(field.name_and_type.name));
1460     result_range.Extend(length.stack_range());
1461     const Type* slice_type =
1462         TypeOracle::GetMutableSliceType(field.name_and_type.type);
1463     return LocationReference::HeapSlice(VisitResult(slice_type, result_range));
1464   } else {
1465     // Const fields are writable during initialization.
1466     VisitResult heap_reference(
1467         TypeOracle::GetMutableReferenceType(field.name_and_type.type),
1468         result_range);
1469     return LocationReference::HeapReference(heap_reference);
1470   }
1471 }
1472 
InitializeClass(const ClassType * class_type,VisitResult allocate_result,const InitializerResults & initializer_results,const LayoutForInitialization & layout)1473 void ImplementationVisitor::InitializeClass(
1474     const ClassType* class_type, VisitResult allocate_result,
1475     const InitializerResults& initializer_results,
1476     const LayoutForInitialization& layout) {
1477   if (const ClassType* super = class_type->GetSuperClass()) {
1478     InitializeClass(super, allocate_result, initializer_results, layout);
1479   }
1480 
1481   for (Field f : class_type->fields()) {
1482     VisitResult initializer_value =
1483         initializer_results.field_value_map.at(f.name_and_type.name);
1484     LocationReference field =
1485         GenerateFieldReferenceForInit(allocate_result, f, layout);
1486     if (f.index) {
1487       DCHECK(field.IsHeapSlice());
1488       VisitResult slice = field.GetVisitResult();
1489       GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
1490                                  "InitializeFieldsFromIterator"),
1491                    {{slice, initializer_value}, {}});
1492     } else {
1493       GenerateAssignToLocation(field, initializer_value);
1494     }
1495   }
1496 }
1497 
GenerateArrayLength(Expression * array_length,Namespace * nspace,const std::map<std::string,LocalValue> & bindings)1498 VisitResult ImplementationVisitor::GenerateArrayLength(
1499     Expression* array_length, Namespace* nspace,
1500     const std::map<std::string, LocalValue>& bindings) {
1501   StackScope stack_scope(this);
1502   CurrentSourcePosition::Scope pos_scope(array_length->pos);
1503   // Switch to the namespace where the class was declared.
1504   CurrentScope::Scope current_scope_scope(nspace);
1505   // Reset local bindings and install local binding for the preceding fields.
1506   BindingsManagersScope bindings_managers_scope;
1507   BlockBindings<LocalValue> field_bindings(&ValueBindingsManager::Get());
1508   for (auto& p : bindings) {
1509     field_bindings.Add(p.first, LocalValue{p.second}, true);
1510   }
1511   VisitResult length = Visit(array_length);
1512   VisitResult converted_length =
1513       GenerateCall("Convert", Arguments{{length}, {}},
1514                    {TypeOracle::GetIntPtrType(), length.type()}, false);
1515   return stack_scope.Yield(converted_length);
1516 }
1517 
GenerateArrayLength(VisitResult object,const Field & field)1518 VisitResult ImplementationVisitor::GenerateArrayLength(VisitResult object,
1519                                                        const Field& field) {
1520   DCHECK(field.index);
1521 
1522   StackScope stack_scope(this);
1523   const ClassType* class_type = *object.type()->ClassSupertype();
1524   std::map<std::string, LocalValue> bindings;
1525   bool before_current = true;
1526   for (Field f : class_type->ComputeAllFields()) {
1527     if (field.name_and_type.name == f.name_and_type.name) {
1528       before_current = false;
1529     }
1530     // We can't generate field references eagerly here, because some preceding
1531     // fields might be optional, and attempting to get a reference to an
1532     // optional field can crash the program if the field isn't present.
1533     // Instead, we use the lazy form of LocalValue to only generate field
1534     // references if they are used in the length expression.
1535     bindings.insert(
1536         {f.name_and_type.name,
1537          f.const_qualified
1538              ? (before_current
1539                     ? LocalValue{[=]() {
1540                         return GenerateFieldReference(object, f, class_type);
1541                       }}
1542                     : LocalValue("Array lengths may only refer to fields "
1543                                  "defined earlier"))
1544              : LocalValue(
1545                    "Non-const fields cannot be used for array lengths.")});
1546   }
1547   return stack_scope.Yield(
1548       GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
1549 }
1550 
GenerateArrayLength(const ClassType * class_type,const InitializerResults & initializer_results,const Field & field)1551 VisitResult ImplementationVisitor::GenerateArrayLength(
1552     const ClassType* class_type, const InitializerResults& initializer_results,
1553     const Field& field) {
1554   DCHECK(field.index);
1555 
1556   StackScope stack_scope(this);
1557   std::map<std::string, LocalValue> bindings;
1558   for (Field f : class_type->ComputeAllFields()) {
1559     if (f.index) break;
1560     const std::string& fieldname = f.name_and_type.name;
1561     VisitResult value = initializer_results.field_value_map.at(fieldname);
1562     bindings.insert(
1563         {fieldname,
1564          f.const_qualified
1565              ? LocalValue{LocationReference::Temporary(
1566                    value, "initial field " + fieldname)}
1567              : LocalValue(
1568                    "Non-const fields cannot be used for array lengths.")});
1569   }
1570   return stack_scope.Yield(
1571       GenerateArrayLength(field.index->expr, class_type->nspace(), bindings));
1572 }
1573 
GenerateLayoutForInitialization(const ClassType * class_type,const InitializerResults & initializer_results)1574 LayoutForInitialization ImplementationVisitor::GenerateLayoutForInitialization(
1575     const ClassType* class_type,
1576     const InitializerResults& initializer_results) {
1577   LayoutForInitialization layout;
1578   VisitResult offset;
1579   for (Field f : class_type->ComputeAllFields()) {
1580     if (f.offset.has_value()) {
1581       offset =
1582           VisitResult(TypeOracle::GetConstInt31Type(), ToString(*f.offset));
1583     }
1584     layout.offsets[f.name_and_type.name] = offset;
1585     if (f.index) {
1586       size_t element_size;
1587       std::string element_size_string;
1588       std::tie(element_size, element_size_string) =
1589           *SizeOf(f.name_and_type.type);
1590       VisitResult array_element_size =
1591           VisitResult(TypeOracle::GetConstInt31Type(), element_size_string);
1592       VisitResult array_length =
1593           GenerateArrayLength(class_type, initializer_results, f);
1594       layout.array_lengths[f.name_and_type.name] = array_length;
1595       Arguments arguments;
1596       arguments.parameters = {offset, array_length, array_element_size};
1597       offset = GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
1598                                           "AddIndexedFieldSizeToObjectSize"),
1599                             arguments);
1600     } else {
1601       DCHECK(f.offset.has_value());
1602     }
1603   }
1604   if (class_type->size().SingleValue()) {
1605     layout.size = VisitResult(TypeOracle::GetConstInt31Type(),
1606                               ToString(*class_type->size().SingleValue()));
1607   } else {
1608     layout.size = offset;
1609   }
1610   if ((size_t{1} << class_type->size().AlignmentLog2()) <
1611       TargetArchitecture::TaggedSize()) {
1612     Arguments arguments;
1613     arguments.parameters = {layout.size};
1614     layout.size = GenerateCall(
1615         QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AlignTagged"),
1616         arguments);
1617   }
1618   return layout;
1619 }
1620 
Visit(NewExpression * expr)1621 VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
1622   StackScope stack_scope(this);
1623   const Type* type = TypeVisitor::ComputeType(expr->type);
1624   const ClassType* class_type = ClassType::DynamicCast(type);
1625   if (class_type == nullptr) {
1626     ReportError("type for new expression must be a class, \"", *type,
1627                 "\" is not");
1628   }
1629 
1630   if (!class_type->AllowInstantiation()) {
1631     // Classes that are only used for testing should never be instantiated.
1632     ReportError(*class_type,
1633                 " cannot be allocated with new (it's used for testing)");
1634   }
1635 
1636   InitializerResults initializer_results =
1637       VisitInitializerResults(class_type, expr->initializers);
1638 
1639   const Field& map_field = class_type->LookupField("map");
1640   if (*map_field.offset != 0) {
1641     ReportError("class initializers must have a map as first parameter");
1642   }
1643   const std::map<std::string, VisitResult>& initializer_fields =
1644       initializer_results.field_value_map;
1645   auto it_object_map = initializer_fields.find(map_field.name_and_type.name);
1646   VisitResult object_map;
1647   if (class_type->IsExtern()) {
1648     if (it_object_map == initializer_fields.end()) {
1649       ReportError("Constructor for ", class_type->name(),
1650                   " needs Map argument!");
1651     }
1652     object_map = it_object_map->second;
1653   } else {
1654     if (it_object_map != initializer_fields.end()) {
1655       ReportError(
1656           "Constructor for ", class_type->name(),
1657           " must not specify Map argument; it is automatically inserted.");
1658     }
1659     Arguments get_struct_map_arguments;
1660     get_struct_map_arguments.parameters.push_back(
1661         VisitResult(TypeOracle::GetConstexprInstanceTypeType(),
1662                     CapifyStringWithUnderscores(class_type->name()) + "_TYPE"));
1663     object_map = GenerateCall(
1664         QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "GetInstanceTypeMap"),
1665         get_struct_map_arguments, {}, false);
1666     CurrentSourcePosition::Scope current_pos(expr->pos);
1667     initializer_results.names.insert(initializer_results.names.begin(),
1668                                      MakeNode<Identifier>("map"));
1669     initializer_results.field_value_map[map_field.name_and_type.name] =
1670         object_map;
1671   }
1672 
1673   CheckInitializersWellformed(class_type->name(),
1674                               class_type->ComputeAllFields(),
1675                               expr->initializers, !class_type->IsExtern());
1676 
1677   LayoutForInitialization layout =
1678       GenerateLayoutForInitialization(class_type, initializer_results);
1679 
1680   Arguments allocate_arguments;
1681   allocate_arguments.parameters.push_back(layout.size);
1682   allocate_arguments.parameters.push_back(object_map);
1683   allocate_arguments.parameters.push_back(
1684       GenerateBoolConstant(expr->pretenured));
1685   VisitResult allocate_result = GenerateCall(
1686       QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING}, "AllocateFromNew"),
1687       allocate_arguments, {class_type}, false);
1688   DCHECK(allocate_result.IsOnStack());
1689 
1690   InitializeClass(class_type, allocate_result, initializer_results, layout);
1691 
1692   return stack_scope.Yield(GenerateCall(
1693       "%RawDownCast", Arguments{{allocate_result}, {}}, {class_type}));
1694 }
1695 
Visit(BreakStatement * stmt)1696 const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
1697   base::Optional<Binding<LocalLabel>*> break_label =
1698       TryLookupLabel(kBreakLabelName);
1699   if (!break_label) {
1700     ReportError("break used outside of loop");
1701   }
1702   assembler().Goto((*break_label)->block);
1703   return TypeOracle::GetNeverType();
1704 }
1705 
Visit(ContinueStatement * stmt)1706 const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
1707   base::Optional<Binding<LocalLabel>*> continue_label =
1708       TryLookupLabel(kContinueLabelName);
1709   if (!continue_label) {
1710     ReportError("continue used outside of loop");
1711   }
1712   assembler().Goto((*continue_label)->block);
1713   return TypeOracle::GetNeverType();
1714 }
1715 
Visit(ForLoopStatement * stmt)1716 const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
1717   BlockBindings<LocalValue> loop_bindings(&ValueBindingsManager::Get());
1718 
1719   if (stmt->var_declaration) Visit(*stmt->var_declaration, &loop_bindings);
1720 
1721   Block* body_block = assembler().NewBlock(assembler().CurrentStack());
1722   Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
1723 
1724   Block* header_block = assembler().NewBlock();
1725   assembler().Goto(header_block);
1726   assembler().Bind(header_block);
1727 
1728   // The continue label is where "continue" statements jump to. If no action
1729   // expression is provided, we jump directly to the header.
1730   Block* continue_block = header_block;
1731 
1732   // The action label is only needed when an action expression was provided.
1733   Block* action_block = nullptr;
1734   if (stmt->action) {
1735     action_block = assembler().NewBlock();
1736 
1737     // The action expression needs to be executed on a continue.
1738     continue_block = action_block;
1739   }
1740 
1741   if (stmt->test) {
1742     GenerateExpressionBranch(*stmt->test, body_block, exit_block);
1743   } else {
1744     assembler().Goto(body_block);
1745   }
1746 
1747   assembler().Bind(body_block);
1748   {
1749     BreakContinueActivator activator(exit_block, continue_block);
1750     const Type* body_result = Visit(stmt->body);
1751     if (body_result != TypeOracle::GetNeverType()) {
1752       assembler().Goto(continue_block);
1753     }
1754   }
1755 
1756   if (stmt->action) {
1757     assembler().Bind(action_block);
1758     const Type* action_result = Visit(*stmt->action);
1759     if (action_result != TypeOracle::GetNeverType()) {
1760       assembler().Goto(header_block);
1761     }
1762   }
1763 
1764   assembler().Bind(exit_block);
1765   return TypeOracle::GetVoidType();
1766 }
1767 
Visit(SpreadExpression * expr)1768 VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
1769   ReportError(
1770       "spread operators are only currently supported in indexed class field "
1771       "initialization expressions");
1772 }
1773 
GenerateImplementation(const std::string & dir)1774 void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
1775   for (SourceId file : SourceFileMap::AllSources()) {
1776     std::string base_filename =
1777         dir + "/" + SourceFileMap::PathFromV8RootWithoutExtension(file);
1778     GlobalContext::PerFileStreams& streams =
1779         GlobalContext::GeneratedPerFile(file);
1780 
1781     std::string csa_cc = streams.csa_ccfile.str();
1782     // Insert missing builtin includes where the marker is.
1783     {
1784       auto pos = csa_cc.find(BuiltinIncludesMarker);
1785       CHECK_NE(pos, std::string::npos);
1786       std::string includes;
1787       for (const SourceId& include : streams.required_builtin_includes) {
1788         std::string include_file =
1789             SourceFileMap::PathFromV8RootWithoutExtension(include);
1790         includes += "#include \"torque-generated/";
1791         includes += include_file;
1792         includes += "-tq-csa.h\"\n";
1793       }
1794       csa_cc.replace(pos, strlen(BuiltinIncludesMarker), std::move(includes));
1795     }
1796 
1797     // TODO(torque-builder): Pass file directly.
1798     WriteFile(base_filename + "-tq-csa.cc", std::move(csa_cc));
1799     WriteFile(base_filename + "-tq-csa.h", streams.csa_headerfile.str());
1800     WriteFile(base_filename + "-tq.inc",
1801               streams.class_definition_headerfile.str());
1802     WriteFile(
1803         base_filename + "-tq-inl.inc",
1804         streams.class_definition_inline_headerfile_macro_declarations.str() +
1805             streams.class_definition_inline_headerfile_macro_definitions.str() +
1806             streams.class_definition_inline_headerfile.str());
1807     WriteFile(base_filename + "-tq.cc", streams.class_definition_ccfile.str());
1808   }
1809 
1810   WriteFile(dir + "/debug-macros.h", debug_macros_h_.str());
1811   WriteFile(dir + "/debug-macros.cc", debug_macros_cc_.str());
1812 }
1813 
GenerateMacroFunctionDeclaration(Macro * macro)1814 cpp::Function ImplementationVisitor::GenerateMacroFunctionDeclaration(
1815     Macro* macro) {
1816   return GenerateFunction(nullptr,
1817                           output_type_ == OutputType::kCC
1818                               ? macro->CCName()
1819                               : output_type_ == OutputType::kCCDebug
1820                                     ? macro->CCDebugName()
1821                                     : macro->ExternalName(),
1822                           macro->signature(), macro->parameter_names());
1823 }
1824 
GenerateFunction(cpp::Class * owner,const std::string & name,const Signature & signature,const NameVector & parameter_names,bool pass_code_assembler_state,std::vector<std::string> * generated_parameter_names)1825 cpp::Function ImplementationVisitor::GenerateFunction(
1826     cpp::Class* owner, const std::string& name, const Signature& signature,
1827     const NameVector& parameter_names, bool pass_code_assembler_state,
1828     std::vector<std::string>* generated_parameter_names) {
1829   cpp::Function f(owner, name);
1830   f.SetInline(output_type_ == OutputType::kCC);
1831 
1832   // Set return type.
1833   // TODO(torque-builder): Consider an overload of SetReturnType that handles
1834   // this.
1835   if (signature.return_type->IsVoidOrNever()) {
1836     f.SetReturnType("void");
1837   } else if (output_type_ == OutputType::kCCDebug) {
1838     f.SetReturnType(std::string("Value<") +
1839                     signature.return_type->GetDebugType() + ">");
1840   } else if (output_type_ == OutputType::kCC) {
1841     f.SetReturnType(signature.return_type->GetRuntimeType());
1842   } else {
1843     DCHECK_EQ(output_type_, OutputType::kCSA);
1844     f.SetReturnType(signature.return_type->GetGeneratedTypeName());
1845   }
1846 
1847   bool ignore_first_parameter = true;
1848   if (output_type_ == OutputType::kCCDebug) {
1849     f.AddParameter("d::MemoryAccessor", "accessor");
1850   } else if (output_type_ == OutputType::kCSA && pass_code_assembler_state) {
1851     f.AddParameter("compiler::CodeAssemblerState*", "state_");
1852   } else {
1853     ignore_first_parameter = false;
1854   }
1855 
1856   // TODO(torque-builder): Consider an overload for AddParameter that handles
1857   // this.
1858   DCHECK_GE(signature.types().size(), parameter_names.size());
1859   for (std::size_t i = 0; i < signature.types().size(); ++i) {
1860     const Type* parameter_type = signature.types()[i];
1861     std::string type;
1862     if (output_type_ == OutputType::kCC) {
1863       type = parameter_type->GetRuntimeType();
1864     } else if (output_type_ == OutputType::kCCDebug) {
1865       type = parameter_type->GetDebugType();
1866     } else {
1867       DCHECK_EQ(output_type_, OutputType::kCSA);
1868       type = parameter_type->GetGeneratedTypeName();
1869     }
1870     f.AddParameter(std::move(type),
1871                    ExternalParameterName(i < parameter_names.size()
1872                                              ? parameter_names[i]->value
1873                                              : std::to_string(i)));
1874   }
1875 
1876   for (const LabelDeclaration& label_info : signature.labels) {
1877     if (output_type_ == OutputType::kCC ||
1878         output_type_ == OutputType::kCCDebug) {
1879       ReportError("Macros that generate runtime code can't have label exits");
1880     }
1881     f.AddParameter("compiler::CodeAssemblerLabel*",
1882                    ExternalLabelName(label_info.name->value));
1883     size_t i = 0;
1884     for (const Type* type : label_info.types) {
1885       std::string generated_type_name;
1886       if (type->StructSupertype()) {
1887         generated_type_name = "\n#error no structs allowed in labels\n";
1888       } else {
1889         generated_type_name = "compiler::TypedCodeAssemblerVariable<";
1890         generated_type_name += type->GetGeneratedTNodeTypeName();
1891         generated_type_name += ">*";
1892       }
1893       f.AddParameter(generated_type_name,
1894                      ExternalLabelParameterName(label_info.name->value, i));
1895       ++i;
1896     }
1897   }
1898 
1899   if (generated_parameter_names) {
1900     *generated_parameter_names = f.GetParameterNames();
1901     if (ignore_first_parameter) {
1902       DCHECK(!generated_parameter_names->empty());
1903       generated_parameter_names->erase(generated_parameter_names->begin());
1904     }
1905   }
1906   return f;
1907 }
1908 
1909 namespace {
1910 
FailCallableLookup(const std::string & reason,const QualifiedName & name,const TypeVector & parameter_types,const std::vector<Binding<LocalLabel> * > & labels,const std::vector<Signature> & candidates,const std::vector<std::pair<GenericCallable *,std::string>> inapplicable_generics)1911 void FailCallableLookup(
1912     const std::string& reason, const QualifiedName& name,
1913     const TypeVector& parameter_types,
1914     const std::vector<Binding<LocalLabel>*>& labels,
1915     const std::vector<Signature>& candidates,
1916     const std::vector<std::pair<GenericCallable*, std::string>>
1917         inapplicable_generics) {
1918   std::stringstream stream;
1919   stream << "\n" << reason << ": \n  " << name << "(" << parameter_types << ")";
1920   if (labels.size() != 0) {
1921     stream << " labels ";
1922     for (size_t i = 0; i < labels.size(); ++i) {
1923       stream << labels[i]->name() << "(" << labels[i]->parameter_types << ")";
1924     }
1925   }
1926   stream << "\ncandidates are:";
1927   for (const Signature& signature : candidates) {
1928     stream << "\n  " << name;
1929     PrintSignature(stream, signature, false);
1930   }
1931   if (inapplicable_generics.size() != 0) {
1932     stream << "\nfailed to instantiate all of these generic declarations:";
1933     for (auto& failure : inapplicable_generics) {
1934       GenericCallable* generic = failure.first;
1935       const std::string& fail_reason = failure.second;
1936       stream << "\n  " << generic->name() << " defined at "
1937              << PositionAsString(generic->Position()) << ":\n    "
1938              << fail_reason << "\n";
1939     }
1940   }
1941   ReportError(stream.str());
1942 }
1943 
GetOrCreateSpecialization(const SpecializationKey<GenericCallable> & key)1944 Callable* GetOrCreateSpecialization(
1945     const SpecializationKey<GenericCallable>& key) {
1946   if (base::Optional<Callable*> specialization =
1947           key.generic->GetSpecialization(key.specialized_types)) {
1948     return *specialization;
1949   }
1950   return DeclarationVisitor::SpecializeImplicit(key);
1951 }
1952 
1953 }  // namespace
1954 
TryLookupLocalValue(const std::string & name)1955 base::Optional<Binding<LocalValue>*> ImplementationVisitor::TryLookupLocalValue(
1956     const std::string& name) {
1957   return ValueBindingsManager::Get().TryLookup(name);
1958 }
1959 
TryLookupLabel(const std::string & name)1960 base::Optional<Binding<LocalLabel>*> ImplementationVisitor::TryLookupLabel(
1961     const std::string& name) {
1962   return LabelBindingsManager::Get().TryLookup(name);
1963 }
1964 
LookupLabel(const std::string & name)1965 Binding<LocalLabel>* ImplementationVisitor::LookupLabel(
1966     const std::string& name) {
1967   base::Optional<Binding<LocalLabel>*> label = TryLookupLabel(name);
1968   if (!label) ReportError("cannot find label ", name);
1969   return *label;
1970 }
1971 
LookupSimpleLabel(const std::string & name)1972 Block* ImplementationVisitor::LookupSimpleLabel(const std::string& name) {
1973   LocalLabel* label = LookupLabel(name);
1974   if (!label->parameter_types.empty()) {
1975     ReportError("label ", name,
1976                 "was expected to have no parameters, but has parameters (",
1977                 label->parameter_types, ")");
1978   }
1979   return label->block;
1980 }
1981 
1982 // Try to lookup a callable with the provided argument types. Do not report
1983 // an error if no matching callable was found, but return false instead.
1984 // This is used to test the presence of overloaded field accessors.
TestLookupCallable(const QualifiedName & name,const TypeVector & parameter_types)1985 bool ImplementationVisitor::TestLookupCallable(
1986     const QualifiedName& name, const TypeVector& parameter_types) {
1987   return LookupCallable(name, Declarations::TryLookup(name), parameter_types,
1988                         {}, {}, true) != nullptr;
1989 }
1990 
InferSpecializationTypes(GenericCallable * generic,const TypeVector & explicit_specialization_types,const TypeVector & explicit_arguments)1991 TypeArgumentInference ImplementationVisitor::InferSpecializationTypes(
1992     GenericCallable* generic, const TypeVector& explicit_specialization_types,
1993     const TypeVector& explicit_arguments) {
1994   std::vector<base::Optional<const Type*>> all_arguments;
1995   const ParameterList& parameters = generic->declaration()->parameters;
1996   for (size_t i = 0; i < parameters.implicit_count; ++i) {
1997     base::Optional<Binding<LocalValue>*> val =
1998         TryLookupLocalValue(parameters.names[i]->value);
1999     all_arguments.push_back(
2000         val ? (*val)->GetLocationReference(*val).ReferencedType()
2001             : base::nullopt);
2002   }
2003   for (const Type* explicit_argument : explicit_arguments) {
2004     all_arguments.push_back(explicit_argument);
2005   }
2006   return generic->InferSpecializationTypes(explicit_specialization_types,
2007                                            all_arguments);
2008 }
2009 
2010 template <class Container>
LookupCallable(const QualifiedName & name,const Container & declaration_container,const TypeVector & parameter_types,const std::vector<Binding<LocalLabel> * > & labels,const TypeVector & specialization_types,bool silence_errors)2011 Callable* ImplementationVisitor::LookupCallable(
2012     const QualifiedName& name, const Container& declaration_container,
2013     const TypeVector& parameter_types,
2014     const std::vector<Binding<LocalLabel>*>& labels,
2015     const TypeVector& specialization_types, bool silence_errors) {
2016   Callable* result = nullptr;
2017 
2018   std::vector<Declarable*> overloads;
2019   std::vector<Signature> overload_signatures;
2020   std::vector<std::pair<GenericCallable*, std::string>> inapplicable_generics;
2021   for (auto* declarable : declaration_container) {
2022     if (GenericCallable* generic = GenericCallable::DynamicCast(declarable)) {
2023       TypeArgumentInference inference = InferSpecializationTypes(
2024           generic, specialization_types, parameter_types);
2025       if (inference.HasFailed()) {
2026         inapplicable_generics.push_back(
2027             std::make_pair(generic, inference.GetFailureReason()));
2028         continue;
2029       }
2030       overloads.push_back(generic);
2031       overload_signatures.push_back(
2032           DeclarationVisitor::MakeSpecializedSignature(
2033               SpecializationKey<GenericCallable>{generic,
2034                                                  inference.GetResult()}));
2035     } else if (Callable* callable = Callable::DynamicCast(declarable)) {
2036       overloads.push_back(callable);
2037       overload_signatures.push_back(callable->signature());
2038     }
2039   }
2040   // Indices of candidates in overloads/overload_signatures.
2041   std::vector<size_t> candidates;
2042   for (size_t i = 0; i < overloads.size(); ++i) {
2043     const Signature& signature = overload_signatures[i];
2044     if (IsCompatibleSignature(signature, parameter_types, labels.size())) {
2045       candidates.push_back(i);
2046     }
2047   }
2048 
2049   if (overloads.empty() && inapplicable_generics.empty()) {
2050     if (silence_errors) return nullptr;
2051     std::stringstream stream;
2052     stream << "no matching declaration found for " << name;
2053     ReportError(stream.str());
2054   } else if (candidates.empty()) {
2055     if (silence_errors) return nullptr;
2056     FailCallableLookup("cannot find suitable callable with name", name,
2057                        parameter_types, labels, overload_signatures,
2058                        inapplicable_generics);
2059   }
2060 
2061   auto is_better_candidate = [&](size_t a, size_t b) {
2062     return ParameterDifference(overload_signatures[a].GetExplicitTypes(),
2063                                parameter_types)
2064         .StrictlyBetterThan(ParameterDifference(
2065             overload_signatures[b].GetExplicitTypes(), parameter_types));
2066   };
2067 
2068   size_t best = *std::min_element(candidates.begin(), candidates.end(),
2069                                   is_better_candidate);
2070   // This check is contained in libstdc++'s std::min_element.
2071   DCHECK(!is_better_candidate(best, best));
2072   for (size_t candidate : candidates) {
2073     if (candidate != best && !is_better_candidate(best, candidate)) {
2074       std::vector<Signature> candidate_signatures;
2075       candidate_signatures.reserve(candidates.size());
2076       for (size_t i : candidates) {
2077         candidate_signatures.push_back(overload_signatures[i]);
2078       }
2079       FailCallableLookup("ambiguous callable ", name, parameter_types, labels,
2080                          candidate_signatures, inapplicable_generics);
2081     }
2082   }
2083 
2084   if (GenericCallable* generic =
2085           GenericCallable::DynamicCast(overloads[best])) {
2086     TypeArgumentInference inference = InferSpecializationTypes(
2087         generic, specialization_types, parameter_types);
2088     result = GetOrCreateSpecialization(
2089         SpecializationKey<GenericCallable>{generic, inference.GetResult()});
2090   } else {
2091     result = Callable::cast(overloads[best]);
2092   }
2093 
2094   size_t caller_size = parameter_types.size();
2095   size_t callee_size =
2096       result->signature().types().size() - result->signature().implicit_count;
2097   if (caller_size != callee_size &&
2098       !result->signature().parameter_types.var_args) {
2099     std::stringstream stream;
2100     stream << "parameter count mismatch calling " << *result << " - expected "
2101            << std::to_string(callee_size) << ", found "
2102            << std::to_string(caller_size);
2103     ReportError(stream.str());
2104   }
2105 
2106   return result;
2107 }
2108 
2109 template <class Container>
LookupCallable(const QualifiedName & name,const Container & declaration_container,const Arguments & arguments,const TypeVector & specialization_types)2110 Callable* ImplementationVisitor::LookupCallable(
2111     const QualifiedName& name, const Container& declaration_container,
2112     const Arguments& arguments, const TypeVector& specialization_types) {
2113   return LookupCallable(name, declaration_container,
2114                         arguments.parameters.ComputeTypeVector(),
2115                         arguments.labels, specialization_types);
2116 }
2117 
LookupMethod(const std::string & name,const AggregateType * receiver_type,const Arguments & arguments,const TypeVector & specialization_types)2118 Method* ImplementationVisitor::LookupMethod(
2119     const std::string& name, const AggregateType* receiver_type,
2120     const Arguments& arguments, const TypeVector& specialization_types) {
2121   TypeVector types(arguments.parameters.ComputeTypeVector());
2122   types.insert(types.begin(), receiver_type);
2123   return Method::cast(LookupCallable({{}, name}, receiver_type->Methods(name),
2124                                      types, arguments.labels,
2125                                      specialization_types));
2126 }
2127 
GetCommonType(const Type * left,const Type * right)2128 const Type* ImplementationVisitor::GetCommonType(const Type* left,
2129                                                  const Type* right) {
2130   const Type* common_type;
2131   if (IsAssignableFrom(left, right)) {
2132     common_type = left;
2133   } else if (IsAssignableFrom(right, left)) {
2134     common_type = right;
2135   } else {
2136     common_type = TypeOracle::GetUnionType(left, right);
2137   }
2138   common_type = common_type->NonConstexprVersion();
2139   return common_type;
2140 }
2141 
GenerateCopy(const VisitResult & to_copy)2142 VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
2143   if (to_copy.IsOnStack()) {
2144     return VisitResult(to_copy.type(),
2145                        assembler().Peek(to_copy.stack_range(), to_copy.type()));
2146   }
2147   return to_copy;
2148 }
2149 
Visit(StructExpression * expr)2150 VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
2151   StackScope stack_scope(this);
2152 
2153   auto& initializers = expr->initializers;
2154   std::vector<VisitResult> values;
2155   std::vector<const Type*> term_argument_types;
2156   values.reserve(initializers.size());
2157   term_argument_types.reserve(initializers.size());
2158 
2159   // Compute values and types of all initializer arguments
2160   for (const NameAndExpression& initializer : initializers) {
2161     VisitResult value = Visit(initializer.expression);
2162     values.push_back(value);
2163     term_argument_types.push_back(value.type());
2164   }
2165 
2166   // Compute and check struct type from given struct name and argument types
2167   const Type* type = TypeVisitor::ComputeTypeForStructExpression(
2168       expr->type, term_argument_types);
2169   if (const auto* struct_type = StructType::DynamicCast(type)) {
2170     CheckInitializersWellformed(struct_type->name(), struct_type->fields(),
2171                                 initializers);
2172 
2173     // Implicitly convert values and thereby build the struct on the stack
2174     StackRange struct_range = assembler().TopRange(0);
2175     auto& fields = struct_type->fields();
2176     for (size_t i = 0; i < values.size(); i++) {
2177       values[i] =
2178           GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
2179       struct_range.Extend(values[i].stack_range());
2180     }
2181 
2182     return stack_scope.Yield(VisitResult(struct_type, struct_range));
2183   } else {
2184     const auto* bitfield_struct_type = BitFieldStructType::cast(type);
2185     CheckInitializersWellformed(bitfield_struct_type->name(),
2186                                 bitfield_struct_type->fields(), initializers);
2187 
2188     // Create a zero and cast it to the desired bitfield struct type.
2189     VisitResult result{TypeOracle::GetConstInt32Type(), "0"};
2190     result = GenerateImplicitConvert(TypeOracle::GetInt32Type(), result);
2191     result = GenerateCall("Unsigned", Arguments{{result}, {}}, {});
2192     result = GenerateCall("%RawDownCast", Arguments{{result}, {}},
2193                           {bitfield_struct_type});
2194 
2195     // Set each field in the result. If these fields are constexpr, then all of
2196     // this initialization will end up reduced to a single value during TurboFan
2197     // optimization.
2198     auto& fields = bitfield_struct_type->fields();
2199     for (size_t i = 0; i < values.size(); i++) {
2200       values[i] =
2201           GenerateImplicitConvert(fields[i].name_and_type.type, values[i]);
2202       result = GenerateSetBitField(bitfield_struct_type, fields[i], result,
2203                                    values[i], /*starts_as_zero=*/true);
2204     }
2205 
2206     return stack_scope.Yield(result);
2207   }
2208 }
2209 
GenerateSetBitField(const Type * bitfield_struct_type,const BitField & bitfield,VisitResult bitfield_struct,VisitResult value,bool starts_as_zero)2210 VisitResult ImplementationVisitor::GenerateSetBitField(
2211     const Type* bitfield_struct_type, const BitField& bitfield,
2212     VisitResult bitfield_struct, VisitResult value, bool starts_as_zero) {
2213   GenerateCopy(bitfield_struct);
2214   GenerateCopy(value);
2215   assembler().Emit(
2216       StoreBitFieldInstruction{bitfield_struct_type, bitfield, starts_as_zero});
2217   return VisitResult(bitfield_struct_type, assembler().TopRange(1));
2218 }
2219 
GetLocationReference(Expression * location)2220 LocationReference ImplementationVisitor::GetLocationReference(
2221     Expression* location) {
2222   switch (location->kind) {
2223     case AstNode::Kind::kIdentifierExpression:
2224       return GetLocationReference(static_cast<IdentifierExpression*>(location));
2225     case AstNode::Kind::kFieldAccessExpression:
2226       return GetLocationReference(
2227           static_cast<FieldAccessExpression*>(location));
2228     case AstNode::Kind::kElementAccessExpression:
2229       return GetLocationReference(
2230           static_cast<ElementAccessExpression*>(location));
2231     case AstNode::Kind::kDereferenceExpression:
2232       return GetLocationReference(
2233           static_cast<DereferenceExpression*>(location));
2234     default:
2235       return LocationReference::Temporary(Visit(location), "expression");
2236   }
2237 }
2238 
GetLocationReference(FieldAccessExpression * expr)2239 LocationReference ImplementationVisitor::GetLocationReference(
2240     FieldAccessExpression* expr) {
2241   return GenerateFieldAccess(GetLocationReference(expr->object),
2242                              expr->field->value, false, expr->field->pos);
2243 }
2244 
GenerateFieldAccess(LocationReference reference,const std::string & fieldname,bool ignore_stuct_field_constness,base::Optional<SourcePosition> pos)2245 LocationReference ImplementationVisitor::GenerateFieldAccess(
2246     LocationReference reference, const std::string& fieldname,
2247     bool ignore_stuct_field_constness, base::Optional<SourcePosition> pos) {
2248   if (reference.IsVariableAccess() &&
2249       reference.variable().type()->StructSupertype()) {
2250     const StructType* type = *reference.variable().type()->StructSupertype();
2251     const Field& field = type->LookupField(fieldname);
2252     if (GlobalContext::collect_language_server_data() && pos.has_value()) {
2253       LanguageServerData::AddDefinition(*pos, field.pos);
2254     }
2255     if (GlobalContext::collect_kythe_data() && pos.has_value()) {
2256       KytheData::AddClassFieldUse(*pos, &field);
2257     }
2258     if (field.const_qualified) {
2259       VisitResult t_value = ProjectStructField(reference.variable(), fieldname);
2260       return LocationReference::Temporary(
2261           t_value, "for constant field '" + field.name_and_type.name + "'");
2262     } else {
2263       return LocationReference::VariableAccess(
2264           ProjectStructField(reference.variable(), fieldname));
2265     }
2266   }
2267   if (reference.IsTemporary() &&
2268       reference.temporary().type()->StructSupertype()) {
2269     if (GlobalContext::collect_language_server_data() && pos.has_value()) {
2270       const StructType* type = *reference.temporary().type()->StructSupertype();
2271       const Field& field = type->LookupField(fieldname);
2272       LanguageServerData::AddDefinition(*pos, field.pos);
2273     }
2274     return LocationReference::Temporary(
2275         ProjectStructField(reference.temporary(), fieldname),
2276         reference.temporary_description());
2277   }
2278   if (base::Optional<const Type*> referenced_type =
2279           reference.ReferencedType()) {
2280     if ((*referenced_type)->IsBitFieldStructType()) {
2281       const BitFieldStructType* bitfield_struct =
2282           BitFieldStructType::cast(*referenced_type);
2283       const BitField& field = bitfield_struct->LookupField(fieldname);
2284       return LocationReference::BitFieldAccess(reference, field);
2285     }
2286     if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
2287             (*referenced_type), TypeOracle::GetSmiTaggedGeneric())) {
2288       const BitFieldStructType* bitfield_struct =
2289           BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
2290       if (bitfield_struct == nullptr) {
2291         ReportError(
2292             "When a value of type SmiTagged<T> is used in a field access "
2293             "expression, T is expected to be a bitfield struct type. Instead, "
2294             "T "
2295             "is ",
2296             **type_wrapped_in_smi);
2297       }
2298       const BitField& field = bitfield_struct->LookupField(fieldname);
2299       return LocationReference::BitFieldAccess(reference, field);
2300     }
2301   }
2302   if (reference.IsHeapReference()) {
2303     VisitResult ref = reference.heap_reference();
2304     bool is_const;
2305     auto generic_type =
2306         TypeOracle::MatchReferenceGeneric(ref.type(), &is_const);
2307     if (!generic_type) {
2308       ReportError(
2309           "Left-hand side of field access expression is marked as a reference "
2310           "but is not of type Reference<...>. Found type: ",
2311           ref.type()->ToString());
2312     }
2313     if (auto struct_type = (*generic_type)->StructSupertype()) {
2314       const Field& field = (*struct_type)->LookupField(fieldname);
2315       // Update the Reference's type to refer to the field type within the
2316       // struct.
2317       ref.SetType(TypeOracle::GetReferenceType(
2318           field.name_and_type.type,
2319           is_const ||
2320               (field.const_qualified && !ignore_stuct_field_constness)));
2321       if (!field.offset.has_value()) {
2322         Error("accessing field with unknown offset").Throw();
2323       }
2324       if (*field.offset != 0) {
2325         // Copy the Reference struct up the stack and update the new copy's
2326         // |offset| value to point to the struct field.
2327         StackScope scope(this);
2328         ref = GenerateCopy(ref);
2329         VisitResult ref_offset = ProjectStructField(ref, "offset");
2330         VisitResult struct_offset{
2331             TypeOracle::GetIntPtrType()->ConstexprVersion(),
2332             std::to_string(*field.offset)};
2333         VisitResult updated_offset =
2334             GenerateCall("+", Arguments{{ref_offset, struct_offset}, {}});
2335         assembler().Poke(ref_offset.stack_range(), updated_offset.stack_range(),
2336                          ref_offset.type());
2337         ref = scope.Yield(ref);
2338       }
2339       return LocationReference::HeapReference(ref);
2340     }
2341   }
2342   VisitResult object_result = GenerateFetchFromLocation(reference);
2343   if (base::Optional<const ClassType*> class_type =
2344           object_result.type()->ClassSupertype()) {
2345     // This is a hack to distinguish the situation where we want to use
2346     // overloaded field accessors from when we want to create a reference.
2347     bool has_explicit_overloads = TestLookupCallable(
2348         QualifiedName{"." + fieldname}, {object_result.type()});
2349     if ((*class_type)->HasField(fieldname) && !has_explicit_overloads) {
2350       const Field& field = (*class_type)->LookupField(fieldname);
2351       if (GlobalContext::collect_language_server_data() && pos.has_value()) {
2352         LanguageServerData::AddDefinition(*pos, field.pos);
2353       }
2354       if (GlobalContext::collect_kythe_data()) {
2355         KytheData::AddClassFieldUse(*pos, &field);
2356       }
2357       return GenerateFieldReference(object_result, field, *class_type);
2358     }
2359   }
2360   return LocationReference::FieldAccess(object_result, fieldname);
2361 }
2362 
GetLocationReference(ElementAccessExpression * expr)2363 LocationReference ImplementationVisitor::GetLocationReference(
2364     ElementAccessExpression* expr) {
2365   LocationReference reference = GetLocationReference(expr->array);
2366   VisitResult index = Visit(expr->index);
2367   if (reference.IsHeapSlice()) {
2368     return GenerateReferenceToItemInHeapSlice(reference, index);
2369   } else {
2370     return LocationReference::ArrayAccess(GenerateFetchFromLocation(reference),
2371                                           index);
2372   }
2373 }
2374 
GenerateReferenceToItemInHeapSlice(LocationReference slice,VisitResult index)2375 LocationReference ImplementationVisitor::GenerateReferenceToItemInHeapSlice(
2376     LocationReference slice, VisitResult index) {
2377   DCHECK(slice.IsHeapSlice());
2378   Arguments arguments{{index}, {}};
2379   const StructType* slice_type = *slice.heap_slice().type()->StructSupertype();
2380   Method* method = LookupMethod("AtIndex", slice_type, arguments, {});
2381   // The reference has to be treated like a normal value when calling methods
2382   // on the underlying slice implementation.
2383   LocationReference slice_value =
2384       LocationReference::Temporary(slice.GetVisitResult(), "slice as value");
2385   return LocationReference::HeapReference(
2386       GenerateCall(method, std::move(slice_value), arguments, {}, false));
2387 }
2388 
GetLocationReference(IdentifierExpression * expr)2389 LocationReference ImplementationVisitor::GetLocationReference(
2390     IdentifierExpression* expr) {
2391   if (expr->namespace_qualification.empty()) {
2392     if (base::Optional<Binding<LocalValue>*> value =
2393             TryLookupLocalValue(expr->name->value)) {
2394       if (GlobalContext::collect_language_server_data()) {
2395         LanguageServerData::AddDefinition(expr->name->pos,
2396                                           (*value)->declaration_position());
2397       }
2398       if (GlobalContext::collect_kythe_data()) {
2399         if (!expr->IsThis()) {
2400           DCHECK_EQ(expr->name->pos.end.column - expr->name->pos.start.column,
2401                     expr->name->value.length());
2402           KytheData::AddBindingUse(expr->name->pos, *value);
2403         }
2404       }
2405       if (expr->generic_arguments.size() != 0) {
2406         ReportError("cannot have generic parameters on local name ",
2407                     expr->name);
2408       }
2409       return (*value)->GetLocationReference(*value);
2410     }
2411   }
2412 
2413   if (expr->IsThis()) {
2414     ReportError("\"this\" cannot be qualified");
2415   }
2416   QualifiedName name =
2417       QualifiedName(expr->namespace_qualification, expr->name->value);
2418   if (base::Optional<Builtin*> builtin = Declarations::TryLookupBuiltin(name)) {
2419     if (GlobalContext::collect_language_server_data()) {
2420       LanguageServerData::AddDefinition(expr->name->pos,
2421                                         (*builtin)->Position());
2422     }
2423     // TODO(v8:12261): Consider collecting KytheData here.
2424     return LocationReference::Temporary(GetBuiltinCode(*builtin),
2425                                         "builtin " + expr->name->value);
2426   }
2427   if (expr->generic_arguments.size() != 0) {
2428     GenericCallable* generic = Declarations::LookupUniqueGeneric(name);
2429     Callable* specialization =
2430         GetOrCreateSpecialization(SpecializationKey<GenericCallable>{
2431             generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
2432     if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
2433       DCHECK(!builtin->IsExternal());
2434       return LocationReference::Temporary(GetBuiltinCode(builtin),
2435                                           "builtin " + expr->name->value);
2436     } else {
2437       ReportError("cannot create function pointer for non-builtin ",
2438                   generic->name());
2439     }
2440   }
2441   Value* value = Declarations::LookupValue(name);
2442   CHECK(value->Position().source.IsValid());
2443   if (auto stream = CurrentFileStreams::Get()) {
2444     stream->required_builtin_includes.insert(value->Position().source);
2445   }
2446   if (GlobalContext::collect_language_server_data()) {
2447     LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
2448   }
2449   if (auto* constant = NamespaceConstant::DynamicCast(value)) {
2450     if (GlobalContext::collect_kythe_data()) {
2451       KytheData::AddConstantUse(expr->name->pos, constant);
2452     }
2453     if (constant->type()->IsConstexpr()) {
2454       return LocationReference::Temporary(
2455           VisitResult(constant->type(), constant->external_name() + "(state_)"),
2456           "namespace constant " + expr->name->value);
2457     }
2458     assembler().Emit(NamespaceConstantInstruction{constant});
2459     StackRange stack_range =
2460         assembler().TopRange(LoweredSlotCount(constant->type()));
2461     return LocationReference::Temporary(
2462         VisitResult(constant->type(), stack_range),
2463         "namespace constant " + expr->name->value);
2464   }
2465   ExternConstant* constant = ExternConstant::cast(value);
2466   if (GlobalContext::collect_kythe_data()) {
2467     KytheData::AddConstantUse(expr->name->pos, constant);
2468   }
2469   return LocationReference::Temporary(constant->value(),
2470                                       "extern value " + expr->name->value);
2471 }
2472 
GetLocationReference(DereferenceExpression * expr)2473 LocationReference ImplementationVisitor::GetLocationReference(
2474     DereferenceExpression* expr) {
2475   VisitResult ref = Visit(expr->reference);
2476   if (!TypeOracle::MatchReferenceGeneric(ref.type())) {
2477     Error("Operator * expects a reference type but found a value of type ",
2478           *ref.type())
2479         .Throw();
2480   }
2481   return LocationReference::HeapReference(ref);
2482 }
2483 
GenerateFetchFromLocation(const LocationReference & reference)2484 VisitResult ImplementationVisitor::GenerateFetchFromLocation(
2485     const LocationReference& reference) {
2486   if (reference.IsTemporary()) {
2487     return GenerateCopy(reference.temporary());
2488   } else if (reference.IsVariableAccess()) {
2489     return GenerateCopy(reference.variable());
2490   } else if (reference.IsHeapReference()) {
2491     const Type* referenced_type = *reference.ReferencedType();
2492     if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
2493       return GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
2494                                         "LoadFloat64OrHole"),
2495                           Arguments{{reference.heap_reference()}, {}});
2496     } else if (auto struct_type = referenced_type->StructSupertype()) {
2497       StackRange result_range = assembler().TopRange(0);
2498       for (const Field& field : (*struct_type)->fields()) {
2499         StackScope scope(this);
2500         const std::string& fieldname = field.name_and_type.name;
2501         VisitResult field_value = scope.Yield(GenerateFetchFromLocation(
2502             GenerateFieldAccess(reference, fieldname)));
2503         result_range.Extend(field_value.stack_range());
2504       }
2505       return VisitResult(referenced_type, result_range);
2506     } else {
2507       GenerateCopy(reference.heap_reference());
2508       assembler().Emit(LoadReferenceInstruction{referenced_type});
2509       DCHECK_EQ(1, LoweredSlotCount(referenced_type));
2510       return VisitResult(referenced_type, assembler().TopRange(1));
2511     }
2512   } else if (reference.IsBitFieldAccess()) {
2513     // First fetch the bitfield struct, then get the bits out of it.
2514     VisitResult bit_field_struct =
2515         GenerateFetchFromLocation(reference.bit_field_struct_location());
2516     assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(),
2517                                              reference.bit_field()});
2518     return VisitResult(*reference.ReferencedType(), assembler().TopRange(1));
2519   } else {
2520     if (reference.IsHeapSlice()) {
2521       ReportError(
2522           "fetching a value directly from an indexed field isn't allowed");
2523     }
2524     DCHECK(reference.IsCallAccess());
2525     return GenerateCall(reference.eval_function(),
2526                         Arguments{reference.call_arguments(), {}});
2527   }
2528 }
2529 
GenerateAssignToLocation(const LocationReference & reference,const VisitResult & assignment_value)2530 void ImplementationVisitor::GenerateAssignToLocation(
2531     const LocationReference& reference, const VisitResult& assignment_value) {
2532   if (reference.IsCallAccess()) {
2533     Arguments arguments{reference.call_arguments(), {}};
2534     arguments.parameters.push_back(assignment_value);
2535     GenerateCall(reference.assign_function(), arguments);
2536   } else if (reference.IsVariableAccess()) {
2537     VisitResult variable = reference.variable();
2538     VisitResult converted_value =
2539         GenerateImplicitConvert(variable.type(), assignment_value);
2540     assembler().Poke(variable.stack_range(), converted_value.stack_range(),
2541                      variable.type());
2542 
2543     // Local variables are detected by the existence of a binding. Assignment
2544     // to local variables is recorded to support lint errors.
2545     if (reference.binding()) {
2546       (*reference.binding())->SetWritten();
2547     }
2548   } else if (reference.IsHeapSlice()) {
2549     ReportError("assigning a value directly to an indexed field isn't allowed");
2550   } else if (reference.IsHeapReference()) {
2551     const Type* referenced_type = *reference.ReferencedType();
2552     if (reference.IsConst()) {
2553       Error("cannot assign to const value of type ", *referenced_type).Throw();
2554     }
2555     if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
2556       GenerateCall(
2557           QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
2558                         "StoreFloat64OrHole"),
2559           Arguments{{reference.heap_reference(), assignment_value}, {}});
2560     } else if (auto struct_type = referenced_type->StructSupertype()) {
2561       if (!assignment_value.type()->IsSubtypeOf(referenced_type)) {
2562         ReportError("Cannot assign to ", *referenced_type,
2563                     " with value of type ", *assignment_value.type());
2564       }
2565       for (const Field& field : (*struct_type)->fields()) {
2566         const std::string& fieldname = field.name_and_type.name;
2567         // Allow assignment of structs even if they contain const fields.
2568         // Const on struct fields just disallows direct writes to them.
2569         bool ignore_stuct_field_constness = true;
2570         GenerateAssignToLocation(
2571             GenerateFieldAccess(reference, fieldname,
2572                                 ignore_stuct_field_constness),
2573             ProjectStructField(assignment_value, fieldname));
2574       }
2575     } else {
2576       GenerateCopy(reference.heap_reference());
2577       VisitResult converted_assignment_value =
2578           GenerateImplicitConvert(referenced_type, assignment_value);
2579       if (referenced_type == TypeOracle::GetFloat64Type()) {
2580         VisitResult silenced_float_value = GenerateCall(
2581             "Float64SilenceNaN", Arguments{{assignment_value}, {}});
2582         assembler().Poke(converted_assignment_value.stack_range(),
2583                          silenced_float_value.stack_range(), referenced_type);
2584       }
2585       assembler().Emit(StoreReferenceInstruction{referenced_type});
2586     }
2587   } else if (reference.IsBitFieldAccess()) {
2588     // First fetch the bitfield struct, then set the updated bits, then store
2589     // it back to where we found it.
2590     VisitResult bit_field_struct =
2591         GenerateFetchFromLocation(reference.bit_field_struct_location());
2592     VisitResult converted_value =
2593         GenerateImplicitConvert(*reference.ReferencedType(), assignment_value);
2594     VisitResult updated_bit_field_struct =
2595         GenerateSetBitField(bit_field_struct.type(), reference.bit_field(),
2596                             bit_field_struct, converted_value);
2597     GenerateAssignToLocation(reference.bit_field_struct_location(),
2598                              updated_bit_field_struct);
2599   } else {
2600     DCHECK(reference.IsTemporary());
2601     ReportError("cannot assign to const-bound or temporary ",
2602                 reference.temporary_description());
2603   }
2604 }
2605 
GeneratePointerCall(Expression * callee,const Arguments & arguments,bool is_tailcall)2606 VisitResult ImplementationVisitor::GeneratePointerCall(
2607     Expression* callee, const Arguments& arguments, bool is_tailcall) {
2608   StackScope scope(this);
2609   TypeVector parameter_types(arguments.parameters.ComputeTypeVector());
2610   VisitResult callee_result = Visit(callee);
2611   if (!callee_result.type()->IsBuiltinPointerType()) {
2612     std::stringstream stream;
2613     stream << "Expected a function pointer type but found "
2614            << *callee_result.type();
2615     ReportError(stream.str());
2616   }
2617   const BuiltinPointerType* type =
2618       BuiltinPointerType::cast(callee_result.type());
2619 
2620   if (type->parameter_types().size() != parameter_types.size()) {
2621     std::stringstream stream;
2622     stream << "parameter count mismatch calling function pointer with Type: "
2623            << *type << " - expected "
2624            << std::to_string(type->parameter_types().size()) << ", found "
2625            << std::to_string(parameter_types.size());
2626     ReportError(stream.str());
2627   }
2628 
2629   ParameterTypes types{type->parameter_types(), false};
2630   Signature sig;
2631   sig.parameter_types = types;
2632   if (!IsCompatibleSignature(sig, parameter_types, 0)) {
2633     std::stringstream stream;
2634     stream << "parameters do not match function pointer signature. Expected: ("
2635            << type->parameter_types() << ") but got: (" << parameter_types
2636            << ")";
2637     ReportError(stream.str());
2638   }
2639 
2640   callee_result = GenerateCopy(callee_result);
2641   StackRange arg_range = assembler().TopRange(0);
2642   for (size_t current = 0; current < arguments.parameters.size(); ++current) {
2643     const Type* to_type = type->parameter_types()[current];
2644     arg_range.Extend(
2645         GenerateImplicitConvert(to_type, arguments.parameters[current])
2646             .stack_range());
2647   }
2648 
2649   assembler().Emit(
2650       CallBuiltinPointerInstruction{is_tailcall, type, arg_range.Size()});
2651 
2652   if (is_tailcall) {
2653     return VisitResult::NeverResult();
2654   }
2655   DCHECK_EQ(1, LoweredSlotCount(type->return_type()));
2656   return scope.Yield(VisitResult(type->return_type(), assembler().TopRange(1)));
2657 }
2658 
AddCallParameter(Callable * callable,VisitResult parameter,const Type * parameter_type,std::vector<VisitResult> * converted_arguments,StackRange * argument_range,std::vector<std::string> * constexpr_arguments,bool inline_macro)2659 void ImplementationVisitor::AddCallParameter(
2660     Callable* callable, VisitResult parameter, const Type* parameter_type,
2661     std::vector<VisitResult>* converted_arguments, StackRange* argument_range,
2662     std::vector<std::string>* constexpr_arguments, bool inline_macro) {
2663   VisitResult converted;
2664   if ((converted_arguments->size() < callable->signature().implicit_count) &&
2665       parameter.type()->IsTopType()) {
2666     converted = GenerateCopy(parameter);
2667   } else {
2668     converted = GenerateImplicitConvert(parameter_type, parameter);
2669   }
2670   converted_arguments->push_back(converted);
2671   if (!inline_macro) {
2672     if (converted.IsOnStack()) {
2673       argument_range->Extend(converted.stack_range());
2674     } else {
2675       constexpr_arguments->push_back(converted.constexpr_value());
2676     }
2677   }
2678 }
2679 
2680 namespace {
GetClassInstanceTypeRange(const ClassType * class_type)2681 std::pair<std::string, std::string> GetClassInstanceTypeRange(
2682     const ClassType* class_type) {
2683   std::pair<std::string, std::string> result;
2684   if (class_type->InstanceTypeRange()) {
2685     auto instance_type_range = *class_type->InstanceTypeRange();
2686     std::string instance_type_string_first =
2687         "static_cast<InstanceType>(" +
2688         std::to_string(instance_type_range.first) + ")";
2689     std::string instance_type_string_second =
2690         "static_cast<InstanceType>(" +
2691         std::to_string(instance_type_range.second) + ")";
2692     result =
2693         std::make_pair(instance_type_string_first, instance_type_string_second);
2694   } else {
2695     ReportError(
2696         "%Min/MaxInstanceType must take a class type that is either a string "
2697         "or has a generated instance type range");
2698   }
2699   return result;
2700 }
2701 }  // namespace
2702 
GenerateCall(Callable * callable,base::Optional<LocationReference> this_reference,Arguments arguments,const TypeVector & specialization_types,bool is_tailcall)2703 VisitResult ImplementationVisitor::GenerateCall(
2704     Callable* callable, base::Optional<LocationReference> this_reference,
2705     Arguments arguments, const TypeVector& specialization_types,
2706     bool is_tailcall) {
2707   CHECK(callable->Position().source.IsValid());
2708   if (auto stream = CurrentFileStreams::Get()) {
2709     stream->required_builtin_includes.insert(callable->Position().source);
2710   }
2711 
2712   const Type* return_type = callable->signature().return_type;
2713 
2714   if (is_tailcall) {
2715     if (Builtin* builtin = Builtin::DynamicCast(CurrentCallable::Get())) {
2716       const Type* outer_return_type = builtin->signature().return_type;
2717       if (!return_type->IsSubtypeOf(outer_return_type)) {
2718         Error("Cannot tailcall, type of result is ", *return_type,
2719               " but should be a subtype of ", *outer_return_type, ".");
2720       }
2721     } else {
2722       Error("Tail calls are only allowed from builtins");
2723     }
2724   }
2725 
2726   bool inline_macro = callable->ShouldBeInlined(output_type_);
2727   std::vector<VisitResult> implicit_arguments;
2728   for (size_t i = 0; i < callable->signature().implicit_count; ++i) {
2729     std::string implicit_name = callable->signature().parameter_names[i]->value;
2730     base::Optional<Binding<LocalValue>*> val =
2731         TryLookupLocalValue(implicit_name);
2732     if (val) {
2733       implicit_arguments.push_back(
2734           GenerateFetchFromLocation((*val)->GetLocationReference(*val)));
2735     } else {
2736       VisitResult unititialized = VisitResult::TopTypeResult(
2737           "implicit parameter '" + implicit_name +
2738               "' is not defined when invoking " + callable->ReadableName() +
2739               " at " + PositionAsString(CurrentSourcePosition::Get()),
2740           callable->signature().parameter_types.types[i]);
2741       implicit_arguments.push_back(unititialized);
2742     }
2743     const Type* type = implicit_arguments.back().type();
2744     if (const TopType* top_type = TopType::DynamicCast(type)) {
2745       if (!callable->IsMacro() || callable->IsExternal()) {
2746         ReportError(
2747             "unititialized implicit parameters can only be passed to "
2748             "Torque-defined macros: the ",
2749             top_type->reason());
2750       }
2751       inline_macro = true;
2752     }
2753   }
2754 
2755   std::vector<VisitResult> converted_arguments;
2756   StackRange argument_range = assembler().TopRange(0);
2757   std::vector<std::string> constexpr_arguments;
2758 
2759   size_t current = 0;
2760   for (; current < callable->signature().implicit_count; ++current) {
2761     AddCallParameter(callable, implicit_arguments[current],
2762                      callable->signature().parameter_types.types[current],
2763                      &converted_arguments, &argument_range,
2764                      &constexpr_arguments, inline_macro);
2765   }
2766 
2767   if (this_reference) {
2768     DCHECK(callable->IsMethod());
2769     Method* method = Method::cast(callable);
2770     // By now, the this reference should either be a variable, a temporary or
2771     // a Slice. In either case the fetch of the VisitResult should succeed.
2772     VisitResult this_value = this_reference->GetVisitResult();
2773     if (inline_macro) {
2774       if (!this_value.type()->IsSubtypeOf(method->aggregate_type())) {
2775         ReportError("this parameter must be a subtype of ",
2776                     *method->aggregate_type(), " but it is of type ",
2777                     *this_value.type());
2778       }
2779     } else {
2780       AddCallParameter(callable, this_value, method->aggregate_type(),
2781                        &converted_arguments, &argument_range,
2782                        &constexpr_arguments, inline_macro);
2783     }
2784     ++current;
2785   }
2786 
2787   for (auto arg : arguments.parameters) {
2788     const Type* to_type = (current >= callable->signature().types().size())
2789                               ? TypeOracle::GetObjectType()
2790                               : callable->signature().types()[current++];
2791     AddCallParameter(callable, arg, to_type, &converted_arguments,
2792                      &argument_range, &constexpr_arguments, inline_macro);
2793   }
2794 
2795   size_t label_count = callable->signature().labels.size();
2796   if (label_count != arguments.labels.size()) {
2797     std::stringstream s;
2798     s << "unexpected number of otherwise labels for "
2799       << callable->ReadableName() << " (expected "
2800       << std::to_string(label_count) << " found "
2801       << std::to_string(arguments.labels.size()) << ")";
2802     ReportError(s.str());
2803   }
2804 
2805   if (callable->IsTransitioning()) {
2806     if (!CurrentCallable::Get()->IsTransitioning()) {
2807       std::stringstream s;
2808       s << *CurrentCallable::Get()
2809         << " isn't marked transitioning but calls the transitioning "
2810         << *callable;
2811       ReportError(s.str());
2812     }
2813   }
2814 
2815   if (auto* builtin = Builtin::DynamicCast(callable)) {
2816     base::Optional<Block*> catch_block = GetCatchBlock();
2817     assembler().Emit(CallBuiltinInstruction{
2818         is_tailcall, builtin, argument_range.Size(), catch_block});
2819     GenerateCatchBlock(catch_block);
2820     if (is_tailcall) {
2821       return VisitResult::NeverResult();
2822     } else {
2823       size_t slot_count = LoweredSlotCount(return_type);
2824       if (builtin->IsStub()) {
2825         if (slot_count < 1 || slot_count > 2) {
2826           ReportError(
2827               "Builtin with stub linkage is expected to return one or two "
2828               "values but returns ",
2829               slot_count);
2830         }
2831       } else {
2832         if (slot_count != 1) {
2833           ReportError(
2834               "Builtin with JS linkage is expected to return one value but "
2835               "returns ",
2836               slot_count);
2837         }
2838       }
2839       return VisitResult(return_type, assembler().TopRange(slot_count));
2840     }
2841   } else if (auto* macro = Macro::DynamicCast(callable)) {
2842     if (is_tailcall) {
2843       ReportError("can't tail call a macro");
2844     }
2845 
2846     macro->SetUsed();
2847 
2848     // If we're currently generating a C++ macro and it's calling another macro,
2849     // then we need to make sure that we also generate C++ code for the called
2850     // macro within the same -inl.inc file.
2851     if ((output_type_ == OutputType::kCC ||
2852          output_type_ == OutputType::kCCDebug) &&
2853         !inline_macro) {
2854       if (auto* torque_macro = TorqueMacro::DynamicCast(macro)) {
2855         auto* streams = CurrentFileStreams::Get();
2856         SourceId file = streams ? streams->file : SourceId::Invalid();
2857         GlobalContext::EnsureInCCOutputList(torque_macro, file);
2858       }
2859     }
2860 
2861     // TODO(torque-builder): Consider a function builder here.
2862     if (return_type->IsConstexpr()) {
2863       DCHECK_EQ(0, arguments.labels.size());
2864       std::stringstream result;
2865       result << "(";
2866       bool first = true;
2867       switch (output_type_) {
2868         case OutputType::kCSA: {
2869           if (auto* extern_macro = ExternMacro::DynamicCast(macro)) {
2870             result << extern_macro->external_assembler_name() << "(state_)."
2871                    << extern_macro->ExternalName() << "(";
2872           } else {
2873             result << macro->ExternalName() << "(state_";
2874             first = false;
2875           }
2876           break;
2877         }
2878         case OutputType::kCC: {
2879           auto* extern_macro = ExternMacro::DynamicCast(macro);
2880           CHECK_NOT_NULL(extern_macro);
2881           result << extern_macro->CCName() << "(";
2882           break;
2883         }
2884         case OutputType::kCCDebug: {
2885           auto* extern_macro = ExternMacro::DynamicCast(macro);
2886           CHECK_NOT_NULL(extern_macro);
2887           result << extern_macro->CCDebugName() << "(accessor";
2888           first = false;
2889           break;
2890         }
2891       }
2892       for (VisitResult arg : converted_arguments) {
2893         DCHECK(!arg.IsOnStack());
2894         if (!first) {
2895           result << ", ";
2896         }
2897         first = false;
2898         result << arg.constexpr_value();
2899       }
2900       result << "))";
2901       return VisitResult(return_type, result.str());
2902     } else if (inline_macro) {
2903       std::vector<Block*> label_blocks;
2904       for (Binding<LocalLabel>* label : arguments.labels) {
2905         label_blocks.push_back(label->block);
2906       }
2907       return InlineMacro(macro, this_reference, converted_arguments,
2908                          label_blocks);
2909     } else if (arguments.labels.empty() &&
2910                return_type != TypeOracle::GetNeverType()) {
2911       base::Optional<Block*> catch_block = GetCatchBlock();
2912       assembler().Emit(
2913           CallCsaMacroInstruction{macro, constexpr_arguments, catch_block});
2914       GenerateCatchBlock(catch_block);
2915       size_t return_slot_count = LoweredSlotCount(return_type);
2916       return VisitResult(return_type, assembler().TopRange(return_slot_count));
2917     } else {
2918       base::Optional<Block*> return_continuation;
2919       if (return_type != TypeOracle::GetNeverType()) {
2920         return_continuation = assembler().NewBlock();
2921       }
2922 
2923       std::vector<Block*> label_blocks;
2924 
2925       for (size_t i = 0; i < label_count; ++i) {
2926         label_blocks.push_back(assembler().NewBlock());
2927       }
2928       base::Optional<Block*> catch_block = GetCatchBlock();
2929       assembler().Emit(CallCsaMacroAndBranchInstruction{
2930           macro, constexpr_arguments, return_continuation, label_blocks,
2931           catch_block});
2932       GenerateCatchBlock(catch_block);
2933 
2934       for (size_t i = 0; i < label_count; ++i) {
2935         Binding<LocalLabel>* label = arguments.labels[i];
2936         size_t callee_label_parameters =
2937             callable->signature().labels[i].types.size();
2938         if (label->parameter_types.size() != callee_label_parameters) {
2939           std::stringstream s;
2940           s << "label " << label->name()
2941             << " doesn't have the right number of parameters (found "
2942             << std::to_string(label->parameter_types.size()) << " expected "
2943             << std::to_string(callee_label_parameters) << ")";
2944           ReportError(s.str());
2945         }
2946         assembler().Bind(label_blocks[i]);
2947         assembler().Goto(
2948             label->block,
2949             LowerParameterTypes(callable->signature().labels[i].types).size());
2950 
2951         size_t j = 0;
2952         for (auto t : callable->signature().labels[i].types) {
2953           const Type* parameter_type = label->parameter_types[j];
2954           if (!t->IsSubtypeOf(parameter_type)) {
2955             ReportError("mismatch of label parameters (label expects ",
2956                         *parameter_type, " but macro produces ", *t,
2957                         " for parameter ", i + 1, ")");
2958           }
2959           j++;
2960         }
2961       }
2962 
2963       if (return_continuation) {
2964         assembler().Bind(*return_continuation);
2965         size_t return_slot_count = LoweredSlotCount(return_type);
2966         return VisitResult(return_type,
2967                            assembler().TopRange(return_slot_count));
2968       } else {
2969         return VisitResult::NeverResult();
2970       }
2971     }
2972   } else if (auto* runtime_function = RuntimeFunction::DynamicCast(callable)) {
2973     base::Optional<Block*> catch_block = GetCatchBlock();
2974     assembler().Emit(CallRuntimeInstruction{
2975         is_tailcall, runtime_function, argument_range.Size(), catch_block});
2976     GenerateCatchBlock(catch_block);
2977     if (is_tailcall || return_type == TypeOracle::GetNeverType()) {
2978       return VisitResult::NeverResult();
2979     } else {
2980       size_t slot_count = LoweredSlotCount(return_type);
2981       DCHECK_LE(slot_count, 1);
2982       // TODO(turbofan): Actually, runtime functions have to return a value, so
2983       // we should assert slot_count == 1 here.
2984       return VisitResult(return_type, assembler().TopRange(slot_count));
2985     }
2986   } else if (auto* intrinsic = Intrinsic::DynamicCast(callable)) {
2987     if (intrinsic->ExternalName() == "%SizeOf") {
2988       if (specialization_types.size() != 1) {
2989         ReportError("%SizeOf must take a single type parameter");
2990       }
2991       const Type* type = specialization_types[0];
2992       std::string size_string;
2993       if (base::Optional<std::tuple<size_t, std::string>> size = SizeOf(type)) {
2994         size_string = std::get<1>(*size);
2995       } else {
2996         Error("size of ", *type, " is not known.");
2997       }
2998       return VisitResult(return_type, size_string);
2999     } else if (intrinsic->ExternalName() == "%ClassHasMapConstant") {
3000       const Type* type = specialization_types[0];
3001       const ClassType* class_type = ClassType::DynamicCast(type);
3002       if (!class_type) {
3003         ReportError("%ClassHasMapConstant must take a class type parameter");
3004       }
3005       // If the class isn't actually used as the parameter to a TNode,
3006       // then we can't rely on the class existing in C++ or being of the same
3007       // type (e.g. it could be a template), so don't use the template CSA
3008       // machinery for accessing the class' map.
3009       if (class_type->name() != class_type->GetGeneratedTNodeTypeName()) {
3010         return VisitResult(return_type, std::string("false"));
3011       } else {
3012         return VisitResult(
3013             return_type,
3014             std::string("CodeStubAssembler(state_).ClassHasMapConstant<") +
3015                 class_type->name() + ">()");
3016       }
3017     } else if (intrinsic->ExternalName() == "%MinInstanceType") {
3018       if (specialization_types.size() != 1) {
3019         ReportError("%MinInstanceType must take a single type parameter");
3020       }
3021       const Type* type = specialization_types[0];
3022       const ClassType* class_type = ClassType::DynamicCast(type);
3023       if (!class_type) {
3024         ReportError("%MinInstanceType must take a class type parameter");
3025       }
3026       std::pair<std::string, std::string> instance_types =
3027           GetClassInstanceTypeRange(class_type);
3028       return VisitResult(return_type, instance_types.first);
3029     } else if (intrinsic->ExternalName() == "%MaxInstanceType") {
3030       if (specialization_types.size() != 1) {
3031         ReportError("%MaxInstanceType must take a single type parameter");
3032       }
3033       const Type* type = specialization_types[0];
3034       const ClassType* class_type = ClassType::DynamicCast(type);
3035       if (!class_type) {
3036         ReportError("%MaxInstanceType must take a class type parameter");
3037       }
3038       std::pair<std::string, std::string> instance_types =
3039           GetClassInstanceTypeRange(class_type);
3040       return VisitResult(return_type, instance_types.second);
3041     } else if (intrinsic->ExternalName() == "%RawConstexprCast") {
3042       if (intrinsic->signature().parameter_types.types.size() != 1 ||
3043           constexpr_arguments.size() != 1) {
3044         ReportError(
3045             "%RawConstexprCast must take a single parameter with constexpr "
3046             "type");
3047       }
3048       if (!return_type->IsConstexpr()) {
3049         std::stringstream s;
3050         s << *return_type
3051           << " return type for %RawConstexprCast is not constexpr";
3052         ReportError(s.str());
3053       }
3054       std::stringstream result;
3055       result << "static_cast<" << return_type->GetGeneratedTypeName() << ">(";
3056       result << constexpr_arguments[0];
3057       result << ")";
3058       return VisitResult(return_type, result.str());
3059     } else if (intrinsic->ExternalName() == "%IndexedFieldLength") {
3060       const Type* type = specialization_types[0];
3061       const ClassType* class_type = ClassType::DynamicCast(type);
3062       if (!class_type) {
3063         ReportError("%IndexedFieldLength must take a class type parameter");
3064       }
3065       const Field& field =
3066           class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
3067       return GenerateArrayLength(VisitResult(type, argument_range), field);
3068     } else if (intrinsic->ExternalName() == "%MakeLazy") {
3069       if (specialization_types[0]->IsStructType()) {
3070         ReportError("%MakeLazy can't use macros that return structs");
3071       }
3072       std::string getter_name = StringLiteralUnquote(constexpr_arguments[0]);
3073 
3074       // Normally the parser would split namespace names for us, but we
3075       // sidestepped it by putting the macro name in a string literal.
3076       QualifiedName qualified_getter_name = QualifiedName::Parse(getter_name);
3077 
3078       // converted_arguments contains all of the arguments to %MakeLazy. We're
3079       // looking for a function that takes all but the first.
3080       Arguments arguments_to_getter;
3081       arguments_to_getter.parameters.insert(
3082           arguments_to_getter.parameters.begin(),
3083           converted_arguments.begin() + 1, converted_arguments.end());
3084 
3085       Callable* callable_macro = LookupCallable(
3086           qualified_getter_name, Declarations::Lookup(qualified_getter_name),
3087           arguments_to_getter, {});
3088       Macro* getter = Macro::DynamicCast(callable_macro);
3089       if (!getter || getter->IsMethod()) {
3090         ReportError(
3091             "%MakeLazy expects a macro, not builtin or other type of callable");
3092       }
3093       if (!getter->signature().labels.empty()) {
3094         ReportError("%MakeLazy requires a macro with no labels");
3095       }
3096       if (!getter->signature().return_type->IsSubtypeOf(
3097               specialization_types[0])) {
3098         ReportError("%MakeLazy expected return type ", *specialization_types[0],
3099                     " but found ", *getter->signature().return_type);
3100       }
3101       if (getter->signature().implicit_count > 0) {
3102         ReportError("Implicit parameters are not yet supported in %MakeLazy");
3103       }
3104 
3105       getter->SetUsed();  // Prevent warnings about unused macros.
3106 
3107       // Now that we've looked up the getter macro, we have to convert the
3108       // arguments again, so that, for example, constexpr arguments can be
3109       // coerced to non-constexpr types and put on the stack.
3110 
3111       std::vector<VisitResult> converted_arguments_for_getter;
3112       StackRange argument_range_for_getter = assembler().TopRange(0);
3113       std::vector<std::string> constexpr_arguments_for_getter;
3114 
3115       size_t arg_count = 0;
3116       for (auto arg : arguments_to_getter.parameters) {
3117         DCHECK_LT(arg_count, getter->signature().types().size());
3118         const Type* to_type = getter->signature().types()[arg_count++];
3119         AddCallParameter(getter, arg, to_type, &converted_arguments_for_getter,
3120                          &argument_range_for_getter,
3121                          &constexpr_arguments_for_getter,
3122                          /*inline_macro=*/false);
3123       }
3124 
3125       // Now that the arguments are prepared, emit the instruction that consumes
3126       // them.
3127       assembler().Emit(MakeLazyNodeInstruction{getter, return_type,
3128                                                constexpr_arguments_for_getter});
3129       return VisitResult(return_type, assembler().TopRange(1));
3130     } else if (intrinsic->ExternalName() == "%FieldSlice") {
3131       const Type* type = specialization_types[0];
3132       const ClassType* class_type = ClassType::DynamicCast(type);
3133       if (!class_type) {
3134         ReportError("The first type parameter to %FieldSlice must be a class");
3135       }
3136       const Field& field =
3137           class_type->LookupField(StringLiteralUnquote(constexpr_arguments[0]));
3138       const Type* expected_slice_type =
3139           field.const_qualified
3140               ? TypeOracle::GetConstSliceType(field.name_and_type.type)
3141               : TypeOracle::GetMutableSliceType(field.name_and_type.type);
3142       const Type* declared_slice_type = specialization_types[1];
3143       if (expected_slice_type != declared_slice_type) {
3144         Error(
3145             "The second type parameter to %FieldSlice must be the precise "
3146             "slice type for the named field");
3147       }
3148       LocationReference ref = GenerateFieldReference(
3149           VisitResult(type, argument_range), field, class_type,
3150           /*treat_optional_as_indexed=*/true);
3151       if (!ref.IsHeapSlice()) {
3152         ReportError("%FieldSlice expected an indexed or optional field");
3153       }
3154       return ref.heap_slice();
3155     } else {
3156       assembler().Emit(CallIntrinsicInstruction{intrinsic, specialization_types,
3157                                                 constexpr_arguments});
3158       size_t return_slot_count =
3159           LoweredSlotCount(intrinsic->signature().return_type);
3160       return VisitResult(return_type, assembler().TopRange(return_slot_count));
3161     }
3162   } else {
3163     UNREACHABLE();
3164   }
3165 }
3166 
GenerateCall(const QualifiedName & callable_name,Arguments arguments,const TypeVector & specialization_types,bool is_tailcall)3167 VisitResult ImplementationVisitor::GenerateCall(
3168     const QualifiedName& callable_name, Arguments arguments,
3169     const TypeVector& specialization_types, bool is_tailcall) {
3170   Callable* callable =
3171       LookupCallable(callable_name, Declarations::Lookup(callable_name),
3172                      arguments, specialization_types);
3173   return GenerateCall(callable, base::nullopt, arguments, specialization_types,
3174                       is_tailcall);
3175 }
3176 
Visit(CallExpression * expr,bool is_tailcall)3177 VisitResult ImplementationVisitor::Visit(CallExpression* expr,
3178                                          bool is_tailcall) {
3179   StackScope scope(this);
3180 
3181   if (expr->callee->name->value == "&" && expr->arguments.size() == 1) {
3182     if (auto* loc_expr = LocationExpression::DynamicCast(expr->arguments[0])) {
3183       LocationReference ref = GetLocationReference(loc_expr);
3184       if (ref.IsHeapReference()) return scope.Yield(ref.heap_reference());
3185       if (ref.IsHeapSlice()) return scope.Yield(ref.heap_slice());
3186     }
3187     ReportError("Unable to create a heap reference.");
3188   }
3189 
3190   Arguments arguments;
3191   QualifiedName name = QualifiedName(expr->callee->namespace_qualification,
3192                                      expr->callee->name->value);
3193   TypeVector specialization_types =
3194       TypeVisitor::ComputeTypeVector(expr->callee->generic_arguments);
3195   bool has_template_arguments = !specialization_types.empty();
3196   for (Expression* arg : expr->arguments)
3197     arguments.parameters.push_back(Visit(arg));
3198   arguments.labels = LabelsFromIdentifiers(expr->labels);
3199   if (!has_template_arguments && name.namespace_qualification.empty() &&
3200       TryLookupLocalValue(name.name)) {
3201     return scope.Yield(
3202         GeneratePointerCall(expr->callee, arguments, is_tailcall));
3203   } else {
3204     if (GlobalContext::collect_language_server_data()) {
3205       Callable* callable = LookupCallable(name, Declarations::Lookup(name),
3206                                           arguments, specialization_types);
3207       LanguageServerData::AddDefinition(expr->callee->name->pos,
3208                                         callable->IdentifierPosition());
3209     }
3210     if (GlobalContext::collect_kythe_data()) {
3211       Callable* callable = LookupCallable(name, Declarations::Lookup(name),
3212                                           arguments, specialization_types);
3213       Callable* caller = CurrentCallable::Get();
3214       KytheData::AddCall(caller, expr->callee->name->pos, callable);
3215     }
3216     if (expr->callee->name->value == "!" && arguments.parameters.size() == 1) {
3217       PropagateBitfieldMark(expr->arguments[0], expr);
3218     }
3219     if (expr->callee->name->value == "==" && arguments.parameters.size() == 2) {
3220       if (arguments.parameters[0].type()->IsConstexpr()) {
3221         PropagateBitfieldMark(expr->arguments[1], expr);
3222       } else if (arguments.parameters[1].type()->IsConstexpr()) {
3223         PropagateBitfieldMark(expr->arguments[0], expr);
3224       }
3225     }
3226     return scope.Yield(
3227         GenerateCall(name, arguments, specialization_types, is_tailcall));
3228   }
3229 }
3230 
Visit(CallMethodExpression * expr)3231 VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
3232   StackScope scope(this);
3233   Arguments arguments;
3234   std::string method_name = expr->method->name->value;
3235   TypeVector specialization_types =
3236       TypeVisitor::ComputeTypeVector(expr->method->generic_arguments);
3237   LocationReference target = GetLocationReference(expr->target);
3238   if (!target.IsVariableAccess()) {
3239     VisitResult result = GenerateFetchFromLocation(target);
3240     target = LocationReference::Temporary(result, "this parameter");
3241   }
3242   const AggregateType* target_type =
3243       (*target.ReferencedType())->AggregateSupertype().value_or(nullptr);
3244   if (!target_type) {
3245     ReportError("target of method call not a struct or class type");
3246   }
3247   for (Expression* arg : expr->arguments) {
3248     arguments.parameters.push_back(Visit(arg));
3249   }
3250   arguments.labels = LabelsFromIdentifiers(expr->labels);
3251   TypeVector argument_types = arguments.parameters.ComputeTypeVector();
3252   DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
3253   QualifiedName qualified_name = QualifiedName(method_name);
3254   Callable* callable = LookupMethod(method_name, target_type, arguments, {});
3255   if (GlobalContext::collect_language_server_data()) {
3256     LanguageServerData::AddDefinition(expr->method->name->pos,
3257                                       callable->IdentifierPosition());
3258   }
3259   if (GlobalContext::collect_kythe_data()) {
3260     Callable* caller = CurrentCallable::Get();
3261     KytheData::AddCall(caller, expr->method->name->pos, callable);
3262   }
3263   return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
3264 }
3265 
Visit(IntrinsicCallExpression * expr)3266 VisitResult ImplementationVisitor::Visit(IntrinsicCallExpression* expr) {
3267   StackScope scope(this);
3268   Arguments arguments;
3269   TypeVector specialization_types =
3270       TypeVisitor::ComputeTypeVector(expr->generic_arguments);
3271   for (Expression* arg : expr->arguments)
3272     arguments.parameters.push_back(Visit(arg));
3273   return scope.Yield(
3274       GenerateCall(expr->name->value, arguments, specialization_types, false));
3275 }
3276 
GenerateBranch(const VisitResult & condition,Block * true_block,Block * false_block)3277 void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
3278                                            Block* true_block,
3279                                            Block* false_block) {
3280   DCHECK_EQ(condition,
3281             VisitResult(TypeOracle::GetBoolType(), assembler().TopRange(1)));
3282   assembler().Branch(true_block, false_block);
3283 }
3284 
GenerateBoolConstant(bool constant)3285 VisitResult ImplementationVisitor::GenerateBoolConstant(bool constant) {
3286   return GenerateImplicitConvert(TypeOracle::GetBoolType(),
3287                                  VisitResult(TypeOracle::GetConstexprBoolType(),
3288                                              constant ? "true" : "false"));
3289 }
3290 
GenerateExpressionBranch(Expression * expression,Block * true_block,Block * false_block)3291 void ImplementationVisitor::GenerateExpressionBranch(Expression* expression,
3292                                                      Block* true_block,
3293                                                      Block* false_block) {
3294   StackScope stack_scope(this);
3295   VisitResult expression_result = this->Visit(expression);
3296   expression_result = stack_scope.Yield(
3297       GenerateImplicitConvert(TypeOracle::GetBoolType(), expression_result));
3298   GenerateBranch(expression_result, true_block, false_block);
3299 }
3300 
GenerateImplicitConvert(const Type * destination_type,VisitResult source)3301 VisitResult ImplementationVisitor::GenerateImplicitConvert(
3302     const Type* destination_type, VisitResult source) {
3303   StackScope scope(this);
3304   if (source.type() == TypeOracle::GetNeverType()) {
3305     ReportError("it is not allowed to use a value of type never");
3306   }
3307 
3308   if (destination_type == source.type()) {
3309     return scope.Yield(GenerateCopy(source));
3310   }
3311 
3312   if (auto from = TypeOracle::ImplicitlyConvertableFrom(destination_type,
3313                                                         source.type())) {
3314     return scope.Yield(GenerateCall(kFromConstexprMacroName,
3315                                     Arguments{{source}, {}},
3316                                     {destination_type, *from}, false));
3317   } else if (IsAssignableFrom(destination_type, source.type())) {
3318     source.SetType(destination_type);
3319     return scope.Yield(GenerateCopy(source));
3320   } else {
3321     std::stringstream s;
3322     if (const TopType* top_type = TopType::DynamicCast(source.type())) {
3323       s << "undefined expression of type " << *destination_type << ": the "
3324         << top_type->reason();
3325     } else {
3326       s << "cannot use expression of type " << *source.type()
3327         << " as a value of type " << *destination_type;
3328     }
3329     ReportError(s.str());
3330   }
3331 }
3332 
GenerateLabelGoto(LocalLabel * label,base::Optional<StackRange> arguments)3333 StackRange ImplementationVisitor::GenerateLabelGoto(
3334     LocalLabel* label, base::Optional<StackRange> arguments) {
3335   return assembler().Goto(label->block, arguments ? arguments->Size() : 0);
3336 }
3337 
LabelsFromIdentifiers(const std::vector<Identifier * > & names)3338 std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
3339     const std::vector<Identifier*>& names) {
3340   std::vector<Binding<LocalLabel>*> result;
3341   result.reserve(names.size());
3342   for (const auto& name : names) {
3343     Binding<LocalLabel>* label = LookupLabel(name->value);
3344     result.push_back(label);
3345 
3346     // Link up labels in "otherwise" part of the call expression with
3347     // either the label in the signature of the calling macro or the label
3348     // block ofa surrounding "try".
3349     if (GlobalContext::collect_language_server_data()) {
3350       LanguageServerData::AddDefinition(name->pos,
3351                                         label->declaration_position());
3352     }
3353     // TODO(v8:12261): Might have to track KytheData here.
3354   }
3355   return result;
3356 }
3357 
LowerParameter(const Type * type,const std::string & parameter_name,Stack<std::string> * lowered_parameters)3358 StackRange ImplementationVisitor::LowerParameter(
3359     const Type* type, const std::string& parameter_name,
3360     Stack<std::string>* lowered_parameters) {
3361   if (base::Optional<const StructType*> struct_type = type->StructSupertype()) {
3362     StackRange range = lowered_parameters->TopRange(0);
3363     for (auto& field : (*struct_type)->fields()) {
3364       StackRange parameter_range = LowerParameter(
3365           field.name_and_type.type,
3366           parameter_name + "." + field.name_and_type.name, lowered_parameters);
3367       range.Extend(parameter_range);
3368     }
3369     return range;
3370   } else {
3371     lowered_parameters->Push(parameter_name);
3372     return lowered_parameters->TopRange(1);
3373   }
3374 }
3375 
LowerLabelParameter(const Type * type,const std::string & parameter_name,std::vector<std::string> * lowered_parameters)3376 void ImplementationVisitor::LowerLabelParameter(
3377     const Type* type, const std::string& parameter_name,
3378     std::vector<std::string>* lowered_parameters) {
3379   if (base::Optional<const StructType*> struct_type = type->StructSupertype()) {
3380     for (auto& field : (*struct_type)->fields()) {
3381       LowerLabelParameter(
3382           field.name_and_type.type,
3383           "&((*" + parameter_name + ")." + field.name_and_type.name + ")",
3384           lowered_parameters);
3385     }
3386   } else {
3387     lowered_parameters->push_back(parameter_name);
3388   }
3389 }
3390 
ExternalLabelName(const std::string & label_name)3391 std::string ImplementationVisitor::ExternalLabelName(
3392     const std::string& label_name) {
3393   return "label_" + label_name;
3394 }
3395 
ExternalLabelParameterName(const std::string & label_name,size_t i)3396 std::string ImplementationVisitor::ExternalLabelParameterName(
3397     const std::string& label_name, size_t i) {
3398   return "label_" + label_name + "_parameter_" + std::to_string(i);
3399 }
3400 
ExternalParameterName(const std::string & name)3401 std::string ImplementationVisitor::ExternalParameterName(
3402     const std::string& name) {
3403   return std::string("p_") + name;
3404 }
3405 
3406 DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager)3407 DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager)
3408 DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable)
3409 DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentFileStreams)
3410 DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue)
3411 
3412 bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
3413                            size_t label_count) {
3414   auto i = sig.parameter_types.types.begin() + sig.implicit_count;
3415   if ((sig.parameter_types.types.size() - sig.implicit_count) > types.size())
3416     return false;
3417   if (sig.labels.size() != label_count) return false;
3418   for (auto current : types) {
3419     if (i == sig.parameter_types.types.end()) {
3420       if (!sig.parameter_types.var_args) return false;
3421       if (!IsAssignableFrom(TypeOracle::GetObjectType(), current)) return false;
3422     } else {
3423       if (!IsAssignableFrom(*i++, current)) return false;
3424     }
3425   }
3426   return true;
3427 }
3428 
GetCatchBlock()3429 base::Optional<Block*> ImplementationVisitor::GetCatchBlock() {
3430   base::Optional<Block*> catch_block;
3431   if (base::Optional<Binding<LocalLabel>*> catch_handler =
3432           TryLookupLabel(kCatchLabelName)) {
3433     catch_block = assembler().NewBlock(base::nullopt, true);
3434   }
3435   return catch_block;
3436 }
3437 
GenerateCatchBlock(base::Optional<Block * > catch_block)3438 void ImplementationVisitor::GenerateCatchBlock(
3439     base::Optional<Block*> catch_block) {
3440   if (catch_block) {
3441     base::Optional<Binding<LocalLabel>*> catch_handler =
3442         TryLookupLabel(kCatchLabelName);
3443     // Reset the local scopes to prevent the macro calls below from using the
3444     // current catch handler.
3445     BindingsManagersScope bindings_managers_scope;
3446     if (assembler().CurrentBlockIsComplete()) {
3447       assembler().Bind(*catch_block);
3448       GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
3449                                  "GetAndResetPendingMessage"),
3450                    Arguments{{}, {}}, {}, false);
3451       assembler().Goto((*catch_handler)->block, 2);
3452     } else {
3453       CfgAssemblerScopedTemporaryBlock temp(&assembler(), *catch_block);
3454       GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
3455                                  "GetAndResetPendingMessage"),
3456                    Arguments{{}, {}}, {}, false);
3457       assembler().Goto((*catch_handler)->block, 2);
3458     }
3459   }
3460 }
VisitAllDeclarables()3461 void ImplementationVisitor::VisitAllDeclarables() {
3462   CurrentCallable::Scope current_callable(nullptr);
3463   const std::vector<std::unique_ptr<Declarable>>& all_declarables =
3464       GlobalContext::AllDeclarables();
3465 
3466   // This has to be an index-based loop because all_declarables can be extended
3467   // during the loop.
3468   for (size_t i = 0; i < all_declarables.size(); ++i) {
3469     try {
3470       Visit(all_declarables[i].get());
3471     } catch (TorqueAbortCompilation&) {
3472       // Recover from compile errors here. The error is recorded already.
3473     }
3474   }
3475 
3476   // Do the same for macros which generate C++ code.
3477   output_type_ = OutputType::kCC;
3478   const std::vector<std::pair<TorqueMacro*, SourceId>>& cc_macros =
3479       GlobalContext::AllMacrosForCCOutput();
3480   for (size_t i = 0; i < cc_macros.size(); ++i) {
3481     try {
3482       Visit(static_cast<Declarable*>(cc_macros[i].first), cc_macros[i].second);
3483     } catch (TorqueAbortCompilation&) {
3484       // Recover from compile errors here. The error is recorded already.
3485     }
3486   }
3487 
3488   // Do the same for macros which generate C++ debug code.
3489   // The set of macros is the same as C++ macros.
3490   output_type_ = OutputType::kCCDebug;
3491   for (size_t i = 0; i < cc_macros.size(); ++i) {
3492     try {
3493       Visit(static_cast<Declarable*>(cc_macros[i].first), cc_macros[i].second);
3494     } catch (TorqueAbortCompilation&) {
3495       // Recover from compile errors here. The error is recorded already.
3496     }
3497   }
3498   output_type_ = OutputType::kCSA;
3499 }
3500 
Visit(Declarable * declarable,base::Optional<SourceId> file)3501 void ImplementationVisitor::Visit(Declarable* declarable,
3502                                   base::Optional<SourceId> file) {
3503   CurrentScope::Scope current_scope(declarable->ParentScope());
3504   CurrentSourcePosition::Scope current_source_position(declarable->Position());
3505   CurrentFileStreams::Scope current_file_streams(
3506       &GlobalContext::GeneratedPerFile(file ? *file
3507                                             : declarable->Position().source));
3508   if (Callable* callable = Callable::DynamicCast(declarable)) {
3509     if (!callable->ShouldGenerateExternalCode(output_type_))
3510       CurrentFileStreams::Get() = nullptr;
3511   }
3512   switch (declarable->kind()) {
3513     case Declarable::kExternMacro:
3514       return Visit(ExternMacro::cast(declarable));
3515     case Declarable::kTorqueMacro:
3516       return Visit(TorqueMacro::cast(declarable));
3517     case Declarable::kMethod:
3518       return Visit(Method::cast(declarable));
3519     case Declarable::kBuiltin:
3520       return Visit(Builtin::cast(declarable));
3521     case Declarable::kTypeAlias:
3522       return Visit(TypeAlias::cast(declarable));
3523     case Declarable::kNamespaceConstant:
3524       return Visit(NamespaceConstant::cast(declarable));
3525     case Declarable::kRuntimeFunction:
3526     case Declarable::kIntrinsic:
3527     case Declarable::kExternConstant:
3528     case Declarable::kNamespace:
3529     case Declarable::kGenericCallable:
3530     case Declarable::kGenericType:
3531       return;
3532   }
3533 }
3534 
MachineTypeString(const Type * type)3535 std::string MachineTypeString(const Type* type) {
3536   if (type->IsSubtypeOf(TypeOracle::GetSmiType())) {
3537     return "MachineType::TaggedSigned()";
3538   }
3539   if (type->IsSubtypeOf(TypeOracle::GetHeapObjectType())) {
3540     return "MachineType::TaggedPointer()";
3541   }
3542   if (type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
3543     return "MachineType::AnyTagged()";
3544   }
3545   return "MachineTypeOf<" + type->GetGeneratedTNodeTypeName() + ">::value";
3546 }
3547 
GenerateBuiltinDefinitionsAndInterfaceDescriptors(const std::string & output_directory)3548 void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
3549     const std::string& output_directory) {
3550   std::stringstream builtin_definitions;
3551   std::string builtin_definitions_file_name = "builtin-definitions.h";
3552 
3553   // This file contains plain interface descriptor definitions and has to be
3554   // included in the middle of interface-descriptors.h. Thus it is not a normal
3555   // header file and uses the .inc suffix instead of the .h suffix.
3556   std::stringstream interface_descriptors;
3557   std::string interface_descriptors_file_name = "interface-descriptors.inc";
3558   {
3559     IncludeGuardScope builtin_definitions_include_guard(
3560         builtin_definitions, builtin_definitions_file_name);
3561 
3562     builtin_definitions
3563         << "\n"
3564            "#define BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, "
3565            "ASM) "
3566            "\\\n";
3567     for (auto& declarable : GlobalContext::AllDeclarables()) {
3568       Builtin* builtin = Builtin::DynamicCast(declarable.get());
3569       if (!builtin || builtin->IsExternal()) continue;
3570       if (builtin->IsStub()) {
3571         builtin_definitions << "TFC(" << builtin->ExternalName() << ", "
3572                             << builtin->ExternalName();
3573         std::string descriptor_name = builtin->ExternalName() + "Descriptor";
3574         bool has_context_parameter = builtin->signature().HasContextParameter();
3575         size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
3576         TypeVector return_types = LowerType(builtin->signature().return_type);
3577 
3578         interface_descriptors << "class " << descriptor_name
3579                               << " : public StaticCallInterfaceDescriptor<"
3580                               << descriptor_name << "> {\n";
3581 
3582         interface_descriptors << " public:\n";
3583 
3584         if (has_context_parameter) {
3585           interface_descriptors << "  DEFINE_RESULT_AND_PARAMETERS(";
3586         } else {
3587           interface_descriptors << "  DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
3588         }
3589         interface_descriptors << return_types.size();
3590         for (size_t i = kFirstNonContextParameter;
3591              i < builtin->parameter_names().size(); ++i) {
3592           Identifier* parameter = builtin->parameter_names()[i];
3593           interface_descriptors << ", k" << CamelifyString(parameter->value);
3594         }
3595         interface_descriptors << ")\n";
3596 
3597         interface_descriptors << "  DEFINE_RESULT_AND_PARAMETER_TYPES(";
3598         PrintCommaSeparatedList(interface_descriptors, return_types,
3599                                 MachineTypeString);
3600         for (size_t i = kFirstNonContextParameter;
3601              i < builtin->parameter_names().size(); ++i) {
3602           const Type* type = builtin->signature().parameter_types.types[i];
3603           interface_descriptors << ", " << MachineTypeString(type);
3604         }
3605         interface_descriptors << ")\n";
3606 
3607         interface_descriptors << "  DECLARE_DEFAULT_DESCRIPTOR("
3608                               << descriptor_name << ")\n";
3609         interface_descriptors << "};\n\n";
3610       } else {
3611         builtin_definitions << "TFJ(" << builtin->ExternalName();
3612         if (builtin->IsVarArgsJavaScript()) {
3613           builtin_definitions << ", kDontAdaptArgumentsSentinel";
3614         } else {
3615           DCHECK(builtin->IsFixedArgsJavaScript());
3616           // FixedArg javascript builtins need to offer the parameter
3617           // count.
3618           int parameter_count =
3619               static_cast<int>(builtin->signature().ExplicitCount());
3620           builtin_definitions << ", JSParameterCount(" << parameter_count
3621                               << ")";
3622           // And the receiver is explicitly declared.
3623           builtin_definitions << ", kReceiver";
3624           for (size_t i = builtin->signature().implicit_count;
3625                i < builtin->parameter_names().size(); ++i) {
3626             Identifier* parameter = builtin->parameter_names()[i];
3627             builtin_definitions << ", k" << CamelifyString(parameter->value);
3628           }
3629         }
3630       }
3631       builtin_definitions << ") \\\n";
3632     }
3633     builtin_definitions << "\n";
3634 
3635     builtin_definitions
3636         << "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
3637     for (const BuiltinPointerType* type :
3638          TypeOracle::AllBuiltinPointerTypes()) {
3639       Builtin* example_builtin =
3640           Declarations::FindSomeInternalBuiltinWithType(type);
3641       if (!example_builtin) {
3642         CurrentSourcePosition::Scope current_source_position(
3643             SourcePosition{CurrentSourceFile::Get(), LineAndColumn::Invalid(),
3644                            LineAndColumn::Invalid()});
3645         ReportError("unable to find any builtin with type \"", *type, "\"");
3646       }
3647       builtin_definitions << "  V(" << type->function_pointer_type_id() << ","
3648                           << example_builtin->ExternalName() << ")\\\n";
3649     }
3650     builtin_definitions << "\n";
3651   }
3652   WriteFile(output_directory + "/" + builtin_definitions_file_name,
3653             builtin_definitions.str());
3654   WriteFile(output_directory + "/" + interface_descriptors_file_name,
3655             interface_descriptors.str());
3656 }
3657 
3658 namespace {
3659 
3660 enum class FieldSectionType : uint32_t {
3661   kNoSection = 0,
3662   kWeakSection = 1 << 0,
3663   kStrongSection = 2 << 0,
3664   kScalarSection = 3 << 0
3665 };
3666 
IsPointerSection(FieldSectionType type)3667 bool IsPointerSection(FieldSectionType type) {
3668   return type == FieldSectionType::kWeakSection ||
3669          type == FieldSectionType::kStrongSection;
3670 }
3671 
3672 using FieldSections = base::Flags<FieldSectionType>;
3673 
ToString(FieldSectionType type)3674 std::string ToString(FieldSectionType type) {
3675   switch (type) {
3676     case FieldSectionType::kNoSection:
3677       return "NoSection";
3678     case FieldSectionType::kWeakSection:
3679       return "WeakFields";
3680     case FieldSectionType::kStrongSection:
3681       return "StrongFields";
3682     case FieldSectionType::kScalarSection:
3683       return "ScalarFields";
3684   }
3685   UNREACHABLE();
3686 }
3687 
3688 class FieldOffsetsGenerator {
3689  public:
FieldOffsetsGenerator(const ClassType * type)3690   explicit FieldOffsetsGenerator(const ClassType* type) : type_(type) {}
3691 
3692   virtual void WriteField(const Field& f, const std::string& size_string) = 0;
3693   virtual void WriteFieldOffsetGetter(const Field& f) = 0;
3694   virtual void WriteMarker(const std::string& marker) = 0;
3695 
~FieldOffsetsGenerator()3696   virtual ~FieldOffsetsGenerator() { CHECK(is_finished_); }
3697 
RecordOffsetFor(const Field & f)3698   void RecordOffsetFor(const Field& f) {
3699     CHECK(!is_finished_);
3700     UpdateSection(f);
3701 
3702     // Emit kHeaderSize before any indexed field.
3703     if (f.index.has_value() && !header_size_emitted_) {
3704       WriteMarker("kHeaderSize");
3705       header_size_emitted_ = true;
3706     }
3707 
3708     // We don't know statically how much space an indexed field takes, so report
3709     // it as zero.
3710     std::string size_string = "0";
3711     if (!f.index.has_value()) {
3712       size_t field_size;
3713       std::tie(field_size, size_string) = f.GetFieldSizeInformation();
3714     }
3715     if (f.offset.has_value()) {
3716       WriteField(f, size_string);
3717     } else {
3718       WriteFieldOffsetGetter(f);
3719     }
3720   }
3721 
Finish()3722   void Finish() {
3723     End(current_section_);
3724     if (!(completed_sections_ & FieldSectionType::kWeakSection)) {
3725       Begin(FieldSectionType::kWeakSection);
3726       End(FieldSectionType::kWeakSection);
3727     }
3728     if (!(completed_sections_ & FieldSectionType::kStrongSection)) {
3729       Begin(FieldSectionType::kStrongSection);
3730       End(FieldSectionType::kStrongSection);
3731     }
3732     is_finished_ = true;
3733 
3734     // In the presence of indexed fields, we already emitted kHeaderSize before
3735     // the indexed field.
3736     if (!type_->IsShape() && !header_size_emitted_) {
3737       WriteMarker("kHeaderSize");
3738     }
3739     if (!type_->IsAbstract() && type_->HasStaticSize()) {
3740       WriteMarker("kSize");
3741     }
3742   }
3743 
3744  protected:
3745   const ClassType* type_;
3746 
3747  private:
GetSectionFor(const Field & f)3748   FieldSectionType GetSectionFor(const Field& f) {
3749     const Type* field_type = f.name_and_type.type;
3750     if (field_type == TypeOracle::GetVoidType()) {
3751       // Allow void type for marker constants of size zero.
3752       return current_section_;
3753     }
3754     StructType::Classification struct_contents =
3755         StructType::ClassificationFlag::kEmpty;
3756     if (auto field_as_struct = field_type->StructSupertype()) {
3757       struct_contents = (*field_as_struct)->ClassifyContents();
3758     }
3759     if ((struct_contents & StructType::ClassificationFlag::kStrongTagged) &&
3760         (struct_contents & StructType::ClassificationFlag::kWeakTagged)) {
3761       // It's okay for a struct to contain both strong and weak data. We'll just
3762       // treat the whole thing as weak. This is required for DescriptorEntry.
3763       struct_contents &= ~StructType::Classification(
3764           StructType::ClassificationFlag::kStrongTagged);
3765     }
3766     bool struct_contains_tagged_fields =
3767         (struct_contents & StructType::ClassificationFlag::kStrongTagged) ||
3768         (struct_contents & StructType::ClassificationFlag::kWeakTagged);
3769     if (struct_contains_tagged_fields &&
3770         (struct_contents & StructType::ClassificationFlag::kUntagged)) {
3771       // We can't declare what section a struct goes in if it has multiple
3772       // categories of data within.
3773       Error(
3774           "Classes do not support fields which are structs containing both "
3775           "tagged and untagged data.")
3776           .Position(f.pos);
3777     }
3778     if ((field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType()) ||
3779          struct_contents == StructType::ClassificationFlag::kStrongTagged) &&
3780         !f.custom_weak_marking) {
3781       return FieldSectionType::kStrongSection;
3782     } else if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
3783                struct_contains_tagged_fields) {
3784       return FieldSectionType::kWeakSection;
3785     } else {
3786       return FieldSectionType::kScalarSection;
3787     }
3788   }
UpdateSection(const Field & f)3789   void UpdateSection(const Field& f) {
3790     FieldSectionType type = GetSectionFor(f);
3791     if (current_section_ == type) return;
3792     if (IsPointerSection(type)) {
3793       if (completed_sections_ & type) {
3794         std::stringstream s;
3795         s << "cannot declare field " << f.name_and_type.name << " in class "
3796           << type_->name() << ", because section " << ToString(type)
3797           << " to which it belongs has already been finished.";
3798         Error(s.str()).Position(f.pos);
3799       }
3800     }
3801     End(current_section_);
3802     current_section_ = type;
3803     Begin(current_section_);
3804   }
Begin(FieldSectionType type)3805   void Begin(FieldSectionType type) {
3806     DCHECK(type != FieldSectionType::kNoSection);
3807     if (!IsPointerSection(type)) return;
3808     WriteMarker("kStartOf" + ToString(type) + "Offset");
3809   }
End(FieldSectionType type)3810   void End(FieldSectionType type) {
3811     if (!IsPointerSection(type)) return;
3812     completed_sections_ |= type;
3813     WriteMarker("kEndOf" + ToString(type) + "Offset");
3814   }
3815 
3816   FieldSectionType current_section_ = FieldSectionType::kNoSection;
3817   FieldSections completed_sections_ = FieldSectionType::kNoSection;
3818   bool is_finished_ = false;
3819   bool header_size_emitted_ = false;
3820 };
3821 
GenerateClassExport(const ClassType * type,std::ostream & header,std::ostream & inl_header)3822 void GenerateClassExport(const ClassType* type, std::ostream& header,
3823                          std::ostream& inl_header) {
3824   const ClassType* super = type->GetSuperClass();
3825   std::string parent = "TorqueGenerated" + type->name() + "<" + type->name() +
3826                        ", " + super->name() + ">";
3827   header << "class " << type->name() << " : public " << parent << " {\n";
3828   header << " public:\n";
3829   if (type->ShouldGenerateBodyDescriptor()) {
3830     header << "  class BodyDescriptor;\n";
3831   }
3832   header << "  TQ_OBJECT_CONSTRUCTORS(" << type->name() << ")\n";
3833   header << "};\n\n";
3834   inl_header << "TQ_OBJECT_CONSTRUCTORS_IMPL(" << type->name() << ")\n";
3835 }
3836 
3837 }  // namespace
3838 
GenerateVisitorLists(const std::string & output_directory)3839 void ImplementationVisitor::GenerateVisitorLists(
3840     const std::string& output_directory) {
3841   std::stringstream header;
3842   std::string file_name = "visitor-lists.h";
3843   {
3844     IncludeGuardScope include_guard(header, file_name);
3845 
3846     header << "#define TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(V)\\\n";
3847     for (const ClassType* type : TypeOracle::GetClasses()) {
3848       if (type->ShouldGenerateBodyDescriptor() && type->OwnInstanceType()) {
3849         std::string type_name =
3850             CapifyStringWithUnderscores(type->name()) + "_TYPE";
3851         header << "V(" << type_name << "," << type->name() << ")\\\n";
3852       }
3853     }
3854     header << "\n";
3855 
3856     header << "#define TORQUE_DATA_ONLY_VISITOR_ID_LIST(V)\\\n";
3857     for (const ClassType* type : TypeOracle::GetClasses()) {
3858       if (type->ShouldGenerateBodyDescriptor() && type->HasNoPointerSlots()) {
3859         header << "V(" << type->name() << ")\\\n";
3860       }
3861     }
3862     header << "\n";
3863 
3864     header << "#define TORQUE_POINTER_VISITOR_ID_LIST(V)\\\n";
3865     for (const ClassType* type : TypeOracle::GetClasses()) {
3866       if (type->ShouldGenerateBodyDescriptor() && !type->HasNoPointerSlots()) {
3867         header << "V(" << type->name() << ")\\\n";
3868       }
3869     }
3870     header << "\n";
3871   }
3872   const std::string output_header_path = output_directory + "/" + file_name;
3873   WriteFile(output_header_path, header.str());
3874 }
3875 
GenerateBitFields(const std::string & output_directory)3876 void ImplementationVisitor::GenerateBitFields(
3877     const std::string& output_directory) {
3878   std::stringstream header;
3879   std::string file_name = "bit-fields.h";
3880   {
3881     IncludeGuardScope include_guard(header, file_name);
3882     header << "#include \"src/base/bit-field.h\"\n\n";
3883     NamespaceScope namespaces(header, {"v8", "internal"});
3884 
3885     for (const auto& type : TypeOracle::GetBitFieldStructTypes()) {
3886       bool all_single_bits = true;  // Track whether every field is one bit.
3887 
3888       header << "#define DEFINE_TORQUE_GENERATED_"
3889              << CapifyStringWithUnderscores(type->name()) << "() \\\n";
3890       std::string type_name = type->GetConstexprGeneratedTypeName();
3891       for (const auto& field : type->fields()) {
3892         const char* suffix = field.num_bits == 1 ? "Bit" : "Bits";
3893         all_single_bits = all_single_bits && field.num_bits == 1;
3894         std::string field_type_name =
3895             field.name_and_type.type->GetConstexprGeneratedTypeName();
3896         header << "  using " << CamelifyString(field.name_and_type.name)
3897                << suffix << " = base::BitField<" << field_type_name << ", "
3898                << field.offset << ", " << field.num_bits << ", " << type_name
3899                << ">; \\\n";
3900       }
3901 
3902       // If every field is one bit, we can also generate a convenient enum.
3903       if (all_single_bits) {
3904         header << "  enum Flag: " << type_name << " { \\\n";
3905         header << "    kNone = 0, \\\n";
3906         for (const auto& field : type->fields()) {
3907           header << "    k" << CamelifyString(field.name_and_type.name) << " = "
3908                  << type_name << "{1} << " << field.offset << ", \\\n";
3909         }
3910         header << "  }; \\\n";
3911         header << "  using Flags = base::Flags<Flag>; \\\n";
3912         header << "  static constexpr int kFlagCount = "
3913                << type->fields().size() << "; \\\n";
3914       }
3915 
3916       header << "\n";
3917     }
3918   }
3919   const std::string output_header_path = output_directory + "/" + file_name;
3920   WriteFile(output_header_path, header.str());
3921 }
3922 
3923 namespace {
3924 
3925 class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
3926  public:
ClassFieldOffsetGenerator(std::ostream & header,std::ostream & inline_header,const ClassType * type,std::string gen_name,const ClassType * parent)3927   ClassFieldOffsetGenerator(std::ostream& header, std::ostream& inline_header,
3928                             const ClassType* type, std::string gen_name,
3929                             const ClassType* parent)
3930       : FieldOffsetsGenerator(type),
3931         hdr_(header),
3932         inl_(inline_header),
3933         previous_field_end_((parent && parent->IsShape()) ? "P::kSize"
3934                                                           : "P::kHeaderSize"),
3935         gen_name_(gen_name) {}
3936 
WriteField(const Field & f,const std::string & size_string)3937   void WriteField(const Field& f, const std::string& size_string) override {
3938     hdr_ << "  // " << f.pos << "\n";
3939     std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
3940     std::string field_end = field + "End";
3941     hdr_ << "  static constexpr int " << field << " = " << previous_field_end_
3942          << ";\n";
3943     hdr_ << "  static constexpr int " << field_end << " = " << field << " + "
3944          << size_string << " - 1;\n";
3945     previous_field_end_ = field_end + " + 1";
3946   }
3947 
WriteFieldOffsetGetter(const Field & f)3948   void WriteFieldOffsetGetter(const Field& f) override {
3949     // A static constexpr int is more convenient than a getter if the offset is
3950     // known.
3951     DCHECK(!f.offset.has_value());
3952 
3953     std::string function_name = CamelifyString(f.name_and_type.name) + "Offset";
3954 
3955     std::vector<cpp::TemplateParameter> params = {cpp::TemplateParameter("D"),
3956                                                   cpp::TemplateParameter("P")};
3957     cpp::Class owner(std::move(params), gen_name_);
3958 
3959     auto getter = cpp::Function::DefaultGetter("int", &owner, function_name);
3960     getter.PrintDeclaration(hdr_);
3961     getter.PrintDefinition(inl_, [&](std::ostream& stream) {
3962       // Item 1 in a flattened slice is the offset.
3963       stream << "  return static_cast<int>(std::get<1>("
3964              << Callable::PrefixNameForCCOutput(type_->GetSliceMacroName(f))
3965              << "(*static_cast<const D*>(this))));\n";
3966     });
3967   }
WriteMarker(const std::string & marker)3968   void WriteMarker(const std::string& marker) override {
3969     hdr_ << "  static constexpr int " << marker << " = " << previous_field_end_
3970          << ";\n";
3971   }
3972 
3973  private:
3974   std::ostream& hdr_;
3975   std::ostream& inl_;
3976   std::string previous_field_end_;
3977   std::string gen_name_;
3978 };
3979 
3980 class CppClassGenerator {
3981  public:
CppClassGenerator(const ClassType * type,std::ostream & header,std::ostream & inl_header,std::ostream & impl)3982   CppClassGenerator(const ClassType* type, std::ostream& header,
3983                     std::ostream& inl_header, std::ostream& impl)
3984       : type_(type),
3985         super_(type->GetSuperClass()),
3986         name_(type->name()),
3987         gen_name_("TorqueGenerated" + name_),
3988         gen_name_T_(gen_name_ + "<D, P>"),
3989         gen_name_I_(gen_name_ + "<" + name_ + ", " + super_->name() + ">"),
3990         hdr_(header),
3991         inl_(inl_header),
3992         impl_(impl) {}
template_decl() const3993   const std::string template_decl() const {
3994     return "template <class D, class P>";
3995   }
3996 
3997   void GenerateClass();
3998   void GenerateCppObjectDefinitionAsserts();
3999 
4000  private:
4001   SourcePosition Position();
4002 
4003   void GenerateClassConstructors();
4004 
4005   // Generates getter and setter runtime member functions for the given class
4006   // field. Traverses depth-first through any nested struct fields to generate
4007   // accessors for them also; struct_fields represents the stack of currently
4008   // active struct fields.
4009   void GenerateFieldAccessors(const Field& class_field,
4010                               std::vector<const Field*>& struct_fields);
4011   void EmitLoadFieldStatement(std::ostream& stream, const Field& class_field,
4012                               std::vector<const Field*>& struct_fields);
4013   void EmitStoreFieldStatement(std::ostream& stream, const Field& class_field,
4014                                std::vector<const Field*>& struct_fields);
4015 
4016   void GenerateClassCasts();
4017 
4018   std::string GetFieldOffsetForAccessor(const Field& f);
4019 
4020   // Gets the C++ type name that should be used in accessors for referring to
4021   // the value of a class field.
4022   std::string GetTypeNameForAccessor(const Field& f);
4023 
4024   bool CanContainHeapObjects(const Type* t);
4025 
4026   const ClassType* type_;
4027   const ClassType* super_;
4028   const std::string name_;
4029   const std::string gen_name_;
4030   const std::string gen_name_T_;
4031   const std::string gen_name_I_;
4032   std::ostream& hdr_;
4033   std::ostream& inl_;
4034   std::ostream& impl_;
4035 };
4036 
GetOrderedUniqueIndexFields(const ClassType & type)4037 base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
4038     const ClassType& type) {
4039   std::vector<Field> result;
4040   std::set<std::string> index_names;
4041   for (const Field& field : type.ComputeAllFields()) {
4042     if (field.index) {
4043       auto name_and_type = ExtractSimpleFieldArraySize(type, field.index->expr);
4044       if (!name_and_type) {
4045         return base::nullopt;
4046       }
4047       index_names.insert(name_and_type->name);
4048     }
4049   }
4050 
4051   for (const Field& field : type.ComputeAllFields()) {
4052     if (index_names.count(field.name_and_type.name) != 0) {
4053       result.push_back(field);
4054     }
4055   }
4056 
4057   return result;
4058 }
4059 
GenerateClass()4060 void CppClassGenerator::GenerateClass() {
4061   // Is<name>_NonInline(HeapObject)
4062   if (!type_->IsShape()) {
4063     cpp::Function f("Is"s + name_ + "_NonInline");
4064     f.SetDescription("Alias for HeapObject::Is"s + name_ +
4065                      "() that avoids inlining.");
4066     f.SetExport(true);
4067     f.SetReturnType("bool");
4068     f.AddParameter("HeapObject", "o");
4069 
4070     f.PrintDeclaration(hdr_);
4071     hdr_ << "\n";
4072     f.PrintDefinition(impl_, [&](std::ostream& stream) {
4073       stream << "  return o.Is" << name_ << "();\n";
4074     });
4075   }
4076   hdr_ << "// Definition " << Position() << "\n";
4077   hdr_ << template_decl() << "\n";
4078   hdr_ << "class " << gen_name_ << " : public P {\n";
4079   hdr_ << "  static_assert(\n"
4080        << "      std::is_same<" << name_ << ", D>::value,\n"
4081        << "      \"Use this class as direct base for " << name_ << ".\");\n";
4082   hdr_ << "  static_assert(\n"
4083        << "      std::is_same<" << super_->name() << ", P>::value,\n"
4084        << "      \"Pass in " << super_->name()
4085        << " as second template parameter for " << gen_name_ << ".\");\n\n";
4086   hdr_ << " public: \n";
4087   hdr_ << "  using Super = P;\n";
4088   hdr_ << "  using TorqueGeneratedClass = " << gen_name_ << "<D,P>;\n\n";
4089   if (!type_->ShouldExport() && !type_->IsExtern()) {
4090     hdr_ << " protected: // not extern or @export\n";
4091   }
4092   for (const Field& f : type_->fields()) {
4093     CurrentSourcePosition::Scope scope(f.pos);
4094     std::vector<const Field*> struct_fields;
4095     GenerateFieldAccessors(f, struct_fields);
4096   }
4097   if (!type_->ShouldExport() && !type_->IsExtern()) {
4098     hdr_ << " public:\n";
4099   }
4100 
4101   GenerateClassCasts();
4102 
4103   std::vector<cpp::TemplateParameter> templateArgs = {
4104       cpp::TemplateParameter("D"), cpp::TemplateParameter("P")};
4105   cpp::Class c(std::move(templateArgs), gen_name_);
4106 
4107   if (type_->ShouldGeneratePrint()) {
4108     hdr_ << "  DECL_PRINTER(" << name_ << ")\n\n";
4109   }
4110 
4111   if (type_->ShouldGenerateVerify()) {
4112     IfDefScope hdr_scope(hdr_, "VERIFY_HEAP");
4113     // V8_EXPORT_PRIVATE void Verify(Isolate*);
4114     cpp::Function f(&c, name_ + "Verify");
4115     f.SetExport();
4116     f.SetReturnType("void");
4117     f.AddParameter("Isolate*", "isolate");
4118     f.PrintDeclaration(hdr_);
4119 
4120     IfDefScope impl_scope(impl_, "VERIFY_HEAP");
4121     impl_ << "\ntemplate <>\n";
4122     impl_ << "void " << gen_name_I_ << "::" << name_
4123           << "Verify(Isolate* isolate) {\n";
4124     impl_ << "  TorqueGeneratedClassVerifiers::" << name_ << "Verify(" << name_
4125           << "::cast(*this), "
4126              "isolate);\n";
4127     impl_ << "}\n\n";
4128     impl_ << "\n";
4129   }
4130 
4131   hdr_ << "\n";
4132   ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_,
4133                               type_->GetSuperClass());
4134   for (auto f : type_->fields()) {
4135     CurrentSourcePosition::Scope scope(f.pos);
4136     g.RecordOffsetFor(f);
4137   }
4138   g.Finish();
4139   hdr_ << "\n";
4140 
4141   auto index_fields = GetOrderedUniqueIndexFields(*type_);
4142 
4143   if (!index_fields.has_value()) {
4144     hdr_ << "  // SizeFor implementations not generated due to complex array "
4145             "lengths\n\n";
4146 
4147     const Field& last_field = type_->LastField();
4148     std::string last_field_item_size =
4149         std::get<1>(*SizeOf(last_field.name_and_type.type));
4150 
4151     // int AllocatedSize() const
4152     {
4153       cpp::Function f =
4154           cpp::Function::DefaultGetter("int", &c, "AllocatedSize");
4155       f.PrintDeclaration(hdr_);
4156 
4157       f.PrintDefinition(inl_, [&](std::ostream& stream) {
4158         stream << "  auto slice = "
4159                << Callable::PrefixNameForCCOutput(
4160                       type_->GetSliceMacroName(last_field))
4161                << "(*static_cast<const D*>(this));\n";
4162         stream << "  return static_cast<int>(std::get<1>(slice)) + "
4163                << last_field_item_size
4164                << " * static_cast<int>(std::get<2>(slice));\n";
4165       });
4166     }
4167   } else if (type_->ShouldGenerateBodyDescriptor() ||
4168              (!type_->IsAbstract() &&
4169               !type_->IsSubtypeOf(TypeOracle::GetJSObjectType()))) {
4170     cpp::Function f(&c, "SizeFor");
4171     f.SetReturnType("int32_t");
4172     f.SetFlags(cpp::Function::kStatic | cpp::Function::kConstexpr |
4173                cpp::Function::kV8Inline);
4174     for (const Field& field : *index_fields) {
4175       f.AddParameter("int", field.name_and_type.name);
4176     }
4177     f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
4178       if (index_fields->empty()) {
4179         stream << "    DCHECK(kHeaderSize == kSize && kHeaderSize == "
4180                << *type_->size().SingleValue() << ");\n";
4181       }
4182       stream << "    int32_t size = kHeaderSize;\n";
4183       for (const Field& field : type_->ComputeAllFields()) {
4184         if (field.index) {
4185           auto index_name_and_type =
4186               *ExtractSimpleFieldArraySize(*type_, field.index->expr);
4187           stream << "    size += " << index_name_and_type.name << " * "
4188                  << std::get<0>(field.GetFieldSizeInformation()) << ";\n";
4189         }
4190       }
4191       if (type_->size().Alignment() < TargetArchitecture::TaggedSize()) {
4192         stream << "    size = OBJECT_POINTER_ALIGN(size);\n";
4193       }
4194       stream << "    return size;\n";
4195     });
4196 
4197     // V8_INLINE int32_t AllocatedSize() const
4198     {
4199       cpp::Function allocated_size_f =
4200           cpp::Function::DefaultGetter("int32_t", &c, "AllocatedSize");
4201       allocated_size_f.SetFlag(cpp::Function::kV8Inline);
4202       allocated_size_f.PrintInlineDefinition(hdr_, [&](std::ostream& stream) {
4203         stream << "    return SizeFor(";
4204         bool first = true;
4205         for (auto field : *index_fields) {
4206           if (!first) stream << ", ";
4207           stream << "this->" << field.name_and_type.name << "()";
4208           first = false;
4209         }
4210         stream << ");\n";
4211       });
4212     }
4213   }
4214 
4215   hdr_ << "  friend class Factory;\n\n";
4216 
4217   GenerateClassConstructors();
4218 
4219   hdr_ << "};\n\n";
4220 
4221   if (type_->ShouldGenerateFullClassDefinition()) {
4222     // If this class extends from another class which is defined in the same tq
4223     // file, and that other class doesn't generate a full class definition, then
4224     // the resulting .inc file would be uncompilable due to ordering
4225     // requirements: the generated file must go before the hand-written
4226     // definition of the base class, but it must also go after that same
4227     // hand-written definition.
4228     base::Optional<const ClassType*> parent = type_->parent()->ClassSupertype();
4229     while (parent) {
4230       if ((*parent)->ShouldGenerateCppClassDefinitions() &&
4231           !(*parent)->ShouldGenerateFullClassDefinition() &&
4232           (*parent)->AttributedToFile() == type_->AttributedToFile()) {
4233         Error("Exported ", *type_,
4234               " cannot be in the same file as its parent extern ", **parent);
4235       }
4236       parent = (*parent)->parent()->ClassSupertype();
4237     }
4238 
4239     GenerateClassExport(type_, hdr_, inl_);
4240   }
4241 }
4242 
GenerateCppObjectDefinitionAsserts()4243 void CppClassGenerator::GenerateCppObjectDefinitionAsserts() {
4244   hdr_ << "// Definition " << Position() << "\n"
4245        << template_decl() << "\n"
4246        << "class " << gen_name_ << "Asserts {\n";
4247 
4248   ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_,
4249                               type_->GetSuperClass());
4250   for (auto f : type_->fields()) {
4251     CurrentSourcePosition::Scope scope(f.pos);
4252     g.RecordOffsetFor(f);
4253   }
4254   g.Finish();
4255   hdr_ << "\n";
4256 
4257   for (auto f : type_->fields()) {
4258     std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
4259     std::string type = f.name_and_type.type->SimpleName();
4260     hdr_ << "  static_assert(" << field << " == D::" << field << ",\n"
4261          << "                \"Values of " << name_ << "::" << field
4262          << " defined in Torque and C++ do not match\");\n"
4263          << "  static_assert(StaticStringsEqual(\"" << type << "\", D::k"
4264          << CamelifyString(f.name_and_type.name) << "TqFieldType),\n"
4265          << "                \"Types of " << name_ << "::" << field
4266          << " specified in Torque and C++ do not match\");\n";
4267   }
4268   hdr_ << "  static_assert(kSize == D::kSize);\n";
4269 
4270   hdr_ << "};\n\n";
4271 }
4272 
GenerateClassCasts()4273 void CppClassGenerator::GenerateClassCasts() {
4274   cpp::Class owner({cpp::TemplateParameter("D"), cpp::TemplateParameter("P")},
4275                    gen_name_);
4276   cpp::Function f(&owner, "cast");
4277   f.SetFlags(cpp::Function::kV8Inline | cpp::Function::kStatic);
4278   f.SetReturnType("D");
4279   f.AddParameter("Object", "object");
4280 
4281   // V8_INLINE static D cast(Object)
4282   f.PrintDeclaration(hdr_);
4283   f.PrintDefinition(inl_, [](std::ostream& stream) {
4284     stream << "    return D(object.ptr());\n";
4285   });
4286   // V8_INLINE static D unchecked_cast(Object)
4287   f.SetName("unchecked_cast");
4288   f.PrintInlineDefinition(hdr_, [](std::ostream& stream) {
4289     stream << "    return bit_cast<D>(object);\n";
4290   });
4291 }
4292 
Position()4293 SourcePosition CppClassGenerator::Position() { return type_->GetPosition(); }
4294 
GenerateClassConstructors()4295 void CppClassGenerator::GenerateClassConstructors() {
4296   const ClassType* typecheck_type = type_;
4297   while (typecheck_type->IsShape()) {
4298     typecheck_type = typecheck_type->GetSuperClass();
4299 
4300     // Shapes have already been checked earlier to inherit from JSObject, so we
4301     // should have found an appropriate type.
4302     DCHECK(typecheck_type);
4303   }
4304 
4305   hdr_ << "  template <class DAlias = D>\n";
4306   hdr_ << "  constexpr " << gen_name_ << "() : P() {\n";
4307   hdr_ << "    static_assert(\n";
4308   hdr_ << "        std::is_base_of<" << gen_name_ << ", DAlias>::value,\n";
4309   hdr_ << "        \"class " << gen_name_
4310        << " should be used as direct base for " << name_ << ".\");\n";
4311   hdr_ << "  }\n\n";
4312 
4313   hdr_ << " protected:\n";
4314   hdr_ << "  inline explicit " << gen_name_ << "(Address ptr);\n";
4315   hdr_ << "  // Special-purpose constructor for subclasses that have fast "
4316           "paths where\n";
4317   hdr_ << "  // their ptr() is a Smi.\n";
4318   hdr_ << "  inline explicit " << gen_name_
4319        << "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi);\n";
4320 
4321   inl_ << "template<class D, class P>\n";
4322   inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
4323   inl_ << "    : P(ptr) {\n";
4324   inl_ << "  SLOW_DCHECK(Is" << typecheck_type->name()
4325        << "_NonInline(*this));\n";
4326   inl_ << "}\n";
4327 
4328   inl_ << "template<class D, class P>\n";
4329   inl_ << "inline " << gen_name_T_ << "::" << gen_name_
4330        << "(Address ptr, HeapObject::AllowInlineSmiStorage allow_smi)\n";
4331   inl_ << "    : P(ptr, allow_smi) {\n";
4332   inl_ << "  SLOW_DCHECK("
4333        << "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
4334           " && this->IsSmi()) || Is"
4335        << typecheck_type->name() << "_NonInline(*this));\n";
4336   inl_ << "}\n";
4337 }
4338 
4339 namespace {
GenerateRuntimeTypeCheck(const Type * type,const std::string & value)4340 std::string GenerateRuntimeTypeCheck(const Type* type,
4341                                      const std::string& value) {
4342   bool maybe_object = !type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
4343   std::stringstream type_check;
4344   bool at_start = true;
4345   // If weak pointers are allowed, then start by checking for a cleared value.
4346   if (maybe_object) {
4347     type_check << value << ".IsCleared()";
4348     at_start = false;
4349   }
4350   for (const TypeChecker& runtime_type : type->GetTypeCheckers()) {
4351     if (!at_start) type_check << " || ";
4352     at_start = false;
4353     if (maybe_object) {
4354       bool strong = runtime_type.weak_ref_to.empty();
4355       if (strong && runtime_type.type == WEAK_HEAP_OBJECT) {
4356         // Rather than a generic Weak<T>, this is the basic type WeakHeapObject.
4357         // We can't validate anything more about the type of the object pointed
4358         // to, so just check that it's weak.
4359         type_check << value << ".IsWeak()";
4360       } else {
4361         type_check << "(" << (strong ? "!" : "") << value << ".IsWeak() && "
4362                    << value << ".GetHeapObjectOrSmi().Is"
4363                    << (strong ? runtime_type.type : runtime_type.weak_ref_to)
4364                    << "())";
4365       }
4366     } else {
4367       type_check << value << ".Is" << runtime_type.type << "()";
4368     }
4369   }
4370   return type_check.str();
4371 }
4372 
GenerateBoundsDCheck(std::ostream & os,const std::string & index,const ClassType * type,const Field & f)4373 void GenerateBoundsDCheck(std::ostream& os, const std::string& index,
4374                           const ClassType* type, const Field& f) {
4375   os << "  DCHECK_GE(" << index << ", 0);\n";
4376   std::string length_expression;
4377   if (base::Optional<NameAndType> array_length =
4378           ExtractSimpleFieldArraySize(*type, f.index->expr)) {
4379     length_expression = "this ->" + array_length->name + "()";
4380   } else {
4381     // The length is element 2 in the flattened field slice.
4382     length_expression =
4383         "static_cast<int>(std::get<2>(" +
4384         Callable::PrefixNameForCCOutput(type->GetSliceMacroName(f)) +
4385         "(*static_cast<const D*>(this))))";
4386   }
4387   os << "  DCHECK_LT(" << index << ", " << length_expression << ");\n";
4388 }
4389 
CanGenerateFieldAccessors(const Type * field_type)4390 bool CanGenerateFieldAccessors(const Type* field_type) {
4391   // float64_or_hole should be treated like float64. For now, we don't need it.
4392   // TODO(v8:10391) Generate accessors for external pointers.
4393   return field_type != TypeOracle::GetVoidType() &&
4394          field_type != TypeOracle::GetFloat64OrHoleType() &&
4395          !field_type->IsSubtypeOf(TypeOracle::GetExternalPointerType());
4396 }
4397 }  // namespace
4398 
4399 // TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
GenerateFieldAccessors(const Field & class_field,std::vector<const Field * > & struct_fields)4400 void CppClassGenerator::GenerateFieldAccessors(
4401     const Field& class_field, std::vector<const Field*>& struct_fields) {
4402   const Field& innermost_field =
4403       struct_fields.empty() ? class_field : *struct_fields.back();
4404   const Type* field_type = innermost_field.name_and_type.type;
4405   if (!CanGenerateFieldAccessors(field_type)) return;
4406 
4407   if (const StructType* struct_type = StructType::DynamicCast(field_type)) {
4408     struct_fields.resize(struct_fields.size() + 1);
4409     for (const Field& struct_field : struct_type->fields()) {
4410       struct_fields[struct_fields.size() - 1] = &struct_field;
4411       GenerateFieldAccessors(class_field, struct_fields);
4412     }
4413     struct_fields.resize(struct_fields.size() - 1);
4414     return;
4415   }
4416 
4417   bool indexed = class_field.index && !class_field.index->optional;
4418   std::string type_name = GetTypeNameForAccessor(innermost_field);
4419   bool can_contain_heap_objects = CanContainHeapObjects(field_type);
4420 
4421   // Assemble an accessor name by accumulating together all of the nested field
4422   // names.
4423   std::string name = class_field.name_and_type.name;
4424   for (const Field* nested_struct_field : struct_fields) {
4425     name += "_" + nested_struct_field->name_and_type.name;
4426   }
4427 
4428   // Generate declarations in header.
4429   if (can_contain_heap_objects && !field_type->IsClassType() &&
4430       !field_type->IsStructType() &&
4431       field_type != TypeOracle::GetObjectType()) {
4432     hdr_ << "  // Torque type: " << field_type->ToString() << "\n";
4433   }
4434 
4435   std::vector<cpp::TemplateParameter> templateParameters = {
4436       cpp::TemplateParameter("D"), cpp::TemplateParameter("P")};
4437   cpp::Class owner(std::move(templateParameters), gen_name_);
4438 
4439   // getter
4440   {
4441     auto getter = cpp::Function::DefaultGetter(type_name, &owner, name);
4442     if (indexed) {
4443       getter.AddParameter("int", "i");
4444     }
4445     const char* tag_argument;
4446     switch (class_field.read_synchronization) {
4447       case FieldSynchronization::kNone:
4448         tag_argument = "";
4449         break;
4450       case FieldSynchronization::kRelaxed:
4451         getter.AddParameter("RelaxedLoadTag");
4452         tag_argument = ", kRelaxedLoad";
4453         break;
4454       case FieldSynchronization::kAcquireRelease:
4455         getter.AddParameter("AcquireLoadTag");
4456         tag_argument = ", kAcquireLoad";
4457         break;
4458     }
4459 
4460     getter.PrintDeclaration(hdr_);
4461 
4462     // For tagged data, generate the extra getter that derives an
4463     // PtrComprCageBase from the current object's pointer.
4464     if (can_contain_heap_objects) {
4465       getter.PrintDefinition(inl_, [&](auto& stream) {
4466         stream
4467             << "  PtrComprCageBase cage_base = GetPtrComprCageBase(*this);\n";
4468         stream << "  return " << gen_name_ << "::" << name << "(cage_base"
4469                << (indexed ? ", i" : "") << tag_argument << ");\n";
4470       });
4471 
4472       getter.InsertParameter(0, "PtrComprCageBase", "cage_base");
4473       getter.PrintDeclaration(hdr_);
4474     }
4475 
4476     getter.PrintDefinition(inl_, [&](auto& stream) {
4477       stream << "  " << type_name << " value;\n";
4478       EmitLoadFieldStatement(stream, class_field, struct_fields);
4479       stream << "  return value;\n";
4480     });
4481   }
4482 
4483   // setter
4484   {
4485     auto setter = cpp::Function::DefaultSetter(
4486         &owner, std::string("set_") + name, type_name, "value");
4487     if (indexed) {
4488       setter.InsertParameter(0, "int", "i");
4489     }
4490     switch (class_field.write_synchronization) {
4491       case FieldSynchronization::kNone:
4492         break;
4493       case FieldSynchronization::kRelaxed:
4494         setter.AddParameter("RelaxedStoreTag");
4495         break;
4496       case FieldSynchronization::kAcquireRelease:
4497         setter.AddParameter("ReleaseStoreTag");
4498         break;
4499     }
4500     if (can_contain_heap_objects) {
4501       setter.AddParameter("WriteBarrierMode", "mode", "UPDATE_WRITE_BARRIER");
4502     }
4503     setter.PrintDeclaration(hdr_);
4504 
4505     setter.PrintDefinition(inl_, [&](auto& stream) {
4506       EmitStoreFieldStatement(stream, class_field, struct_fields);
4507     });
4508   }
4509 
4510   hdr_ << "\n";
4511 }
4512 
GetFieldOffsetForAccessor(const Field & f)4513 std::string CppClassGenerator::GetFieldOffsetForAccessor(const Field& f) {
4514   if (f.offset.has_value()) {
4515     return "k" + CamelifyString(f.name_and_type.name) + "Offset";
4516   }
4517   return CamelifyString(f.name_and_type.name) + "Offset()";
4518 }
4519 
GetTypeNameForAccessor(const Field & f)4520 std::string CppClassGenerator::GetTypeNameForAccessor(const Field& f) {
4521   const Type* field_type = f.name_and_type.type;
4522   if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4523     const Type* constexpr_version = field_type->ConstexprVersion();
4524     if (!constexpr_version) {
4525       Error("Field accessor for ", type_->name(), ":: ", f.name_and_type.name,
4526             " cannot be generated because its type ", *field_type,
4527             " is neither a subclass of Object nor does the type have a "
4528             "constexpr "
4529             "version.")
4530           .Position(f.pos)
4531           .Throw();
4532     }
4533     return constexpr_version->GetGeneratedTypeName();
4534   }
4535   if (field_type->IsSubtypeOf(TypeOracle::GetSmiType())) {
4536     // Follow the convention to create Smi accessors with type int.
4537     return "int";
4538   }
4539   return field_type->UnhandlifiedCppTypeName();
4540 }
4541 
CanContainHeapObjects(const Type * t)4542 bool CppClassGenerator::CanContainHeapObjects(const Type* t) {
4543   return t->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
4544          !t->IsSubtypeOf(TypeOracle::GetSmiType());
4545 }
4546 
EmitLoadFieldStatement(std::ostream & stream,const Field & class_field,std::vector<const Field * > & struct_fields)4547 void CppClassGenerator::EmitLoadFieldStatement(
4548     std::ostream& stream, const Field& class_field,
4549     std::vector<const Field*>& struct_fields) {
4550   const Field& innermost_field =
4551       struct_fields.empty() ? class_field : *struct_fields.back();
4552   const Type* field_type = innermost_field.name_and_type.type;
4553   std::string type_name = GetTypeNameForAccessor(innermost_field);
4554   const std::string class_field_size =
4555       std::get<1>(class_field.GetFieldSizeInformation());
4556 
4557   // field_offset contains both the offset from the beginning of the object to
4558   // the class field and the combined offsets of any nested struct fields
4559   // within, but not the index adjustment.
4560   std::string field_offset = GetFieldOffsetForAccessor(class_field);
4561   for (const Field* nested_struct_field : struct_fields) {
4562     field_offset += " + " + std::to_string(*nested_struct_field->offset);
4563   }
4564 
4565   std::string offset = field_offset;
4566   if (class_field.index) {
4567     const char* index = class_field.index->optional ? "0" : "i";
4568     GenerateBoundsDCheck(stream, index, type_, class_field);
4569     stream << "  int offset = " << field_offset << " + " << index << " * "
4570            << class_field_size << ";\n";
4571     offset = "offset";
4572   }
4573 
4574   stream << "  value = ";
4575 
4576   if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4577     if (class_field.read_synchronization ==
4578         FieldSynchronization::kAcquireRelease) {
4579       ReportError("Torque doesn't support @cppAcquireRead on untagged data");
4580     } else if (class_field.read_synchronization ==
4581                FieldSynchronization::kRelaxed) {
4582       ReportError("Torque doesn't support @cppRelaxedRead on untagged data");
4583     }
4584     stream << "this->template ReadField<" << type_name << ">(" << offset
4585            << ");\n";
4586   } else {
4587     const char* load;
4588     switch (class_field.read_synchronization) {
4589       case FieldSynchronization::kNone:
4590         load = "load";
4591         break;
4592       case FieldSynchronization::kRelaxed:
4593         load = "Relaxed_Load";
4594         break;
4595       case FieldSynchronization::kAcquireRelease:
4596         load = "Acquire_Load";
4597         break;
4598     }
4599     bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
4600     const std::string load_type = is_smi ? "Smi" : type_name;
4601     const char* postfix = is_smi ? ".value()" : "";
4602     const char* optional_cage_base = is_smi ? "" : "cage_base, ";
4603 
4604     stream << "TaggedField<" << load_type << ">::" << load << "("
4605            << optional_cage_base << "*this, " << offset << ")" << postfix
4606            << ";\n";
4607   }
4608 
4609   if (CanContainHeapObjects(field_type)) {
4610     stream << "  DCHECK(" << GenerateRuntimeTypeCheck(field_type, "value")
4611            << ");\n";
4612   }
4613 }
4614 
EmitStoreFieldStatement(std::ostream & stream,const Field & class_field,std::vector<const Field * > & struct_fields)4615 void CppClassGenerator::EmitStoreFieldStatement(
4616     std::ostream& stream, const Field& class_field,
4617     std::vector<const Field*>& struct_fields) {
4618   const Field& innermost_field =
4619       struct_fields.empty() ? class_field : *struct_fields.back();
4620   const Type* field_type = innermost_field.name_and_type.type;
4621   std::string type_name = GetTypeNameForAccessor(innermost_field);
4622   const std::string class_field_size =
4623       std::get<1>(class_field.GetFieldSizeInformation());
4624 
4625   // field_offset contains both the offset from the beginning of the object to
4626   // the class field and the combined offsets of any nested struct fields
4627   // within, but not the index adjustment.
4628   std::string field_offset = GetFieldOffsetForAccessor(class_field);
4629   for (const Field* nested_struct_field : struct_fields) {
4630     field_offset += " + " + std::to_string(*nested_struct_field->offset);
4631   }
4632 
4633   std::string offset = field_offset;
4634   if (class_field.index) {
4635     const char* index = class_field.index->optional ? "0" : "i";
4636     GenerateBoundsDCheck(stream, index, type_, class_field);
4637     stream << "  int offset = " << field_offset << " + " << index << " * "
4638            << class_field_size << ";\n";
4639     offset = "offset";
4640   }
4641 
4642   if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4643     stream << "  this->template WriteField<" << type_name << ">(" << offset
4644            << ", value);\n";
4645   } else {
4646     bool strong_pointer = field_type->IsSubtypeOf(TypeOracle::GetObjectType());
4647     bool is_smi = field_type->IsSubtypeOf(TypeOracle::GetSmiType());
4648     const char* write_macro;
4649     if (!strong_pointer) {
4650       if (class_field.write_synchronization ==
4651           FieldSynchronization::kAcquireRelease) {
4652         ReportError("Torque doesn't support @releaseWrite on weak fields");
4653       }
4654       write_macro = "RELAXED_WRITE_WEAK_FIELD";
4655     } else {
4656       switch (class_field.write_synchronization) {
4657         case FieldSynchronization::kNone:
4658           write_macro = "WRITE_FIELD";
4659           break;
4660         case FieldSynchronization::kRelaxed:
4661           write_macro = "RELAXED_WRITE_FIELD";
4662           break;
4663         case FieldSynchronization::kAcquireRelease:
4664           write_macro = "RELEASE_WRITE_FIELD";
4665           break;
4666       }
4667     }
4668     const std::string value_to_write = is_smi ? "Smi::FromInt(value)" : "value";
4669 
4670     if (!is_smi) {
4671       stream << "  SLOW_DCHECK("
4672              << GenerateRuntimeTypeCheck(field_type, "value") << ");\n";
4673     }
4674     stream << "  " << write_macro << "(*this, " << offset << ", "
4675            << value_to_write << ");\n";
4676     if (!is_smi) {
4677       const char* write_barrier = strong_pointer
4678                                       ? "CONDITIONAL_WRITE_BARRIER"
4679                                       : "CONDITIONAL_WEAK_WRITE_BARRIER";
4680       stream << "  " << write_barrier << "(*this, " << offset
4681              << ", value, mode);\n";
4682     }
4683   }
4684 }
4685 
GenerateStructLayoutDescription(std::ostream & header,const StructType * type)4686 void GenerateStructLayoutDescription(std::ostream& header,
4687                                      const StructType* type) {
4688   header << "struct TorqueGenerated" << CamelifyString(type->name())
4689          << "Offsets {\n";
4690   for (const Field& field : type->fields()) {
4691     header << "  static constexpr int k"
4692            << CamelifyString(field.name_and_type.name)
4693            << "Offset = " << *field.offset << ";\n";
4694   }
4695   header << "  static constexpr int kSize = " << type->PackedSize() << ";\n";
4696   header << "};\n\n";
4697 }
4698 
4699 }  // namespace
4700 
GenerateClassDefinitions(const std::string & output_directory)4701 void ImplementationVisitor::GenerateClassDefinitions(
4702     const std::string& output_directory) {
4703   std::stringstream factory_header;
4704   std::stringstream factory_impl;
4705   std::string factory_basename = "factory";
4706 
4707   std::stringstream forward_declarations;
4708   std::string forward_declarations_filename = "class-forward-declarations.h";
4709 
4710   {
4711     factory_impl << "#include \"src/heap/factory-base.h\"\n";
4712     factory_impl << "#include \"src/heap/factory-base-inl.h\"\n";
4713     factory_impl << "#include \"src/heap/heap.h\"\n";
4714     factory_impl << "#include \"src/heap/heap-inl.h\"\n";
4715     factory_impl << "#include \"src/execution/isolate.h\"\n";
4716     factory_impl << "#include "
4717                     "\"src/objects/all-objects-inl.h\"\n\n";
4718     NamespaceScope factory_impl_namespaces(factory_impl, {"v8", "internal"});
4719     factory_impl << "\n";
4720 
4721     IncludeGuardScope include_guard(forward_declarations,
4722                                     forward_declarations_filename);
4723     NamespaceScope forward_declarations_namespaces(forward_declarations,
4724                                                    {"v8", "internal"});
4725 
4726     std::set<const StructType*, TypeLess> structs_used_in_classes;
4727 
4728     // Emit forward declarations.
4729     for (const ClassType* type : TypeOracle::GetClasses()) {
4730       CurrentSourcePosition::Scope position_activator(type->GetPosition());
4731       auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
4732       std::ostream& header = streams.class_definition_headerfile;
4733       std::string name = type->ShouldGenerateCppClassDefinitions()
4734                              ? type->name()
4735                              : type->GetGeneratedTNodeTypeName();
4736       if (type->ShouldGenerateCppClassDefinitions()) {
4737         header << "class " << name << ";\n";
4738       }
4739       forward_declarations << "class " << name << ";\n";
4740     }
4741 
4742     for (const ClassType* type : TypeOracle::GetClasses()) {
4743       CurrentSourcePosition::Scope position_activator(type->GetPosition());
4744       auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
4745       std::ostream& header = streams.class_definition_headerfile;
4746       std::ostream& inline_header = streams.class_definition_inline_headerfile;
4747       std::ostream& implementation = streams.class_definition_ccfile;
4748 
4749       if (type->ShouldGenerateCppClassDefinitions()) {
4750         CppClassGenerator g(type, header, inline_header, implementation);
4751         g.GenerateClass();
4752       } else if (type->ShouldGenerateCppObjectDefinitionAsserts()) {
4753         CppClassGenerator g(type, header, inline_header, implementation);
4754         g.GenerateCppObjectDefinitionAsserts();
4755       }
4756       for (const Field& f : type->fields()) {
4757         const Type* field_type = f.name_and_type.type;
4758         if (auto field_as_struct = field_type->StructSupertype()) {
4759           structs_used_in_classes.insert(*field_as_struct);
4760         }
4761       }
4762       if (type->ShouldGenerateFactoryFunction()) {
4763         std::string return_type = type->HandlifiedCppTypeName();
4764         std::string function_name = "New" + type->name();
4765         std::stringstream parameters;
4766         for (const Field& f : type->ComputeAllFields()) {
4767           if (f.name_and_type.name == "map") continue;
4768           if (!f.index) {
4769             std::string type_string =
4770                 f.name_and_type.type->HandlifiedCppTypeName();
4771             parameters << type_string << " " << f.name_and_type.name << ", ";
4772           }
4773         }
4774         parameters << "AllocationType allocation_type";
4775 
4776         factory_header << return_type << " " << function_name << "("
4777                        << parameters.str() << ");\n";
4778         factory_impl << "template <typename Impl>\n";
4779         factory_impl << return_type
4780                      << " TorqueGeneratedFactory<Impl>::" << function_name
4781                      << "(" << parameters.str() << ") {\n";
4782 
4783         factory_impl << " int size = ";
4784         const ClassType* super = type->GetSuperClass();
4785         std::string gen_name = "TorqueGenerated" + type->name();
4786         std::string gen_name_T =
4787             gen_name + "<" + type->name() + ", " + super->name() + ">";
4788         factory_impl << gen_name_T << "::SizeFor(";
4789 
4790         bool first = true;
4791         auto index_fields = GetOrderedUniqueIndexFields(*type);
4792         CHECK(index_fields.has_value());
4793         for (auto index_field : *index_fields) {
4794           if (!first) {
4795             factory_impl << ", ";
4796           }
4797           factory_impl << index_field.name_and_type.name;
4798           first = false;
4799         }
4800 
4801         factory_impl << ");\n";
4802         factory_impl << "  Map map = factory()->read_only_roots()."
4803                      << SnakeifyString(type->name()) << "_map();";
4804         factory_impl << "  HeapObject raw_object =\n";
4805         factory_impl << "    factory()->AllocateRawWithImmortalMap(size, "
4806                         "allocation_type, map);\n";
4807         factory_impl << "  " << type->UnhandlifiedCppTypeName()
4808                      << " result = " << type->UnhandlifiedCppTypeName()
4809                      << "::cast(raw_object);\n";
4810         factory_impl << "  DisallowGarbageCollection no_gc;";
4811         factory_impl << "  WriteBarrierMode write_barrier_mode =\n"
4812                      << "     allocation_type == AllocationType::kYoung\n"
4813                      << "     ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;\n"
4814                      << "  USE(write_barrier_mode);\n";
4815 
4816         for (const Field& f : type->ComputeAllFields()) {
4817           if (f.name_and_type.name == "map") continue;
4818           if (!f.index) {
4819             factory_impl << "  result.TorqueGeneratedClass::set_"
4820                          << SnakeifyString(f.name_and_type.name) << "(";
4821             if (f.name_and_type.type->IsSubtypeOf(
4822                     TypeOracle::GetTaggedType()) &&
4823                 !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
4824               factory_impl << "*" << f.name_and_type.name
4825                            << ", write_barrier_mode";
4826             } else {
4827               factory_impl << f.name_and_type.name;
4828             }
4829             factory_impl << ");\n";
4830           }
4831         }
4832 
4833         factory_impl << "  return handle(result, factory()->isolate());\n";
4834         factory_impl << "}\n\n";
4835 
4836         factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
4837                      << return_type
4838                      << "TorqueGeneratedFactory<Factory>::" << function_name
4839                      << "(" << parameters.str() << ");\n";
4840         factory_impl << "template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) "
4841                      << return_type << "TorqueGeneratedFactory<LocalFactory>::"
4842                      << function_name << "(" << parameters.str() << ");\n";
4843 
4844         factory_impl << "\n\n";
4845       }
4846     }
4847 
4848     for (const StructType* type : structs_used_in_classes) {
4849       CurrentSourcePosition::Scope position_activator(type->GetPosition());
4850       std::ostream& header =
4851           GlobalContext::GeneratedPerFile(type->GetPosition().source)
4852               .class_definition_headerfile;
4853       if (type != TypeOracle::GetFloat64OrHoleType()) {
4854         GenerateStructLayoutDescription(header, type);
4855       }
4856     }
4857   }
4858   WriteFile(output_directory + "/" + factory_basename + ".inc",
4859             factory_header.str());
4860   WriteFile(output_directory + "/" + factory_basename + ".cc",
4861             factory_impl.str());
4862   WriteFile(output_directory + "/" + forward_declarations_filename,
4863             forward_declarations.str());
4864 }
4865 
4866 namespace {
GeneratePrintDefinitionsForClass(std::ostream & impl,const ClassType * type,const std::string & gen_name,const std::string & gen_name_T,const std::string template_params)4867 void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
4868                                       const std::string& gen_name,
4869                                       const std::string& gen_name_T,
4870                                       const std::string template_params) {
4871   impl << template_params << "\n";
4872   impl << "void " << gen_name_T << "::" << type->name()
4873        << "Print(std::ostream& os) {\n";
4874   impl << "  this->PrintHeader(os, \"" << type->name() << "\");\n";
4875   auto hierarchy = type->GetHierarchy();
4876   std::map<std::string, const AggregateType*> field_names;
4877   for (const AggregateType* aggregate_type : hierarchy) {
4878     for (const Field& f : aggregate_type->fields()) {
4879       if (f.name_and_type.name == "map" || f.index.has_value() ||
4880           !CanGenerateFieldAccessors(f.name_and_type.type)) {
4881         continue;
4882       }
4883       std::string getter = f.name_and_type.name;
4884       if (aggregate_type != type) {
4885         // We must call getters directly on the class that provided them,
4886         // because a subclass could have hidden them.
4887         getter = aggregate_type->name() + "::TorqueGeneratedClass::" + getter;
4888       }
4889       if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()) ||
4890           !f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
4891         impl << "  os << \"\\n - " << f.name_and_type.name << ": \" << ";
4892         if (f.name_and_type.type->StructSupertype()) {
4893           // TODO(turbofan): Print struct fields too.
4894           impl << "\" <struct field printing still unimplemented>\";\n";
4895         } else {
4896           impl << "this->" << getter;
4897           switch (f.read_synchronization) {
4898             case FieldSynchronization::kNone:
4899               impl << "();\n";
4900               break;
4901             case FieldSynchronization::kRelaxed:
4902               impl << "(kRelaxedLoad);\n";
4903               break;
4904             case FieldSynchronization::kAcquireRelease:
4905               impl << "(kAcquireLoad);\n";
4906               break;
4907           }
4908         }
4909       } else {
4910         impl << "  os << \"\\n - " << f.name_and_type.name << ": \" << "
4911              << "Brief(this->" << getter;
4912         switch (f.read_synchronization) {
4913           case FieldSynchronization::kNone:
4914             impl << "());\n";
4915             break;
4916           case FieldSynchronization::kRelaxed:
4917             impl << "(kRelaxedLoad));\n";
4918             break;
4919           case FieldSynchronization::kAcquireRelease:
4920             impl << "(kAcquireLoad));\n";
4921             break;
4922         }
4923       }
4924     }
4925   }
4926   impl << "  os << '\\n';\n";
4927   impl << "}\n\n";
4928 }
4929 }  // namespace
4930 
GeneratePrintDefinitions(const std::string & output_directory)4931 void ImplementationVisitor::GeneratePrintDefinitions(
4932     const std::string& output_directory) {
4933   std::stringstream impl;
4934   std::string file_name = "objects-printer.cc";
4935   {
4936     IfDefScope object_print(impl, "OBJECT_PRINT");
4937 
4938     impl << "#include <iosfwd>\n\n";
4939     impl << "#include \"src/objects/all-objects-inl.h\"\n\n";
4940 
4941     NamespaceScope impl_namespaces(impl, {"v8", "internal"});
4942 
4943     for (const ClassType* type : TypeOracle::GetClasses()) {
4944       if (!type->ShouldGeneratePrint()) continue;
4945       DCHECK(type->ShouldGenerateCppClassDefinitions());
4946       const ClassType* super = type->GetSuperClass();
4947       std::string gen_name = "TorqueGenerated" + type->name();
4948       std::string gen_name_T =
4949           gen_name + "<" + type->name() + ", " + super->name() + ">";
4950       std::string template_decl = "template <>";
4951       GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
4952                                        template_decl);
4953     }
4954   }
4955 
4956   std::string new_contents(impl.str());
4957   WriteFile(output_directory + "/" + file_name, new_contents);
4958 }
4959 
MatchSimpleBodyDescriptor(const ClassType * type)4960 base::Optional<std::string> MatchSimpleBodyDescriptor(const ClassType* type) {
4961   std::vector<ObjectSlotKind> slots = type->ComputeHeaderSlotKinds();
4962   if (!type->HasStaticSize()) {
4963     slots.push_back(*type->ComputeArraySlotKind());
4964   }
4965 
4966   // Skip the map slot.
4967   size_t i = 1;
4968   while (i < slots.size() && slots[i] == ObjectSlotKind::kNoPointer) ++i;
4969   if (i == slots.size()) return "DataOnlyBodyDescriptor";
4970   bool has_weak_pointers = false;
4971   size_t start_index = i;
4972   for (; i < slots.size(); ++i) {
4973     if (slots[i] == ObjectSlotKind::kStrongPointer) {
4974       continue;
4975     } else if (slots[i] == ObjectSlotKind::kMaybeObjectPointer) {
4976       has_weak_pointers = true;
4977     } else if (slots[i] == ObjectSlotKind::kNoPointer) {
4978       break;
4979     } else {
4980       return base::nullopt;
4981     }
4982   }
4983   size_t end_index = i;
4984   for (; i < slots.size(); ++i) {
4985     if (slots[i] != ObjectSlotKind::kNoPointer) return base::nullopt;
4986   }
4987   size_t start_offset = start_index * TargetArchitecture::TaggedSize();
4988   size_t end_offset = end_index * TargetArchitecture::TaggedSize();
4989   // We pick a suffix-range body descriptor even in cases where the object size
4990   // is fixed, to reduce the amount of code executed for object visitation.
4991   if (end_index == slots.size()) {
4992     return ToString("SuffixRange", has_weak_pointers ? "Weak" : "",
4993                     "BodyDescriptor<", start_offset, ">");
4994   }
4995   if (!has_weak_pointers) {
4996     return ToString("FixedRangeBodyDescriptor<", start_offset, ", ", end_offset,
4997                     ">");
4998   }
4999   return base::nullopt;
5000 }
5001 
GenerateBodyDescriptors(const std::string & output_directory)5002 void ImplementationVisitor::GenerateBodyDescriptors(
5003     const std::string& output_directory) {
5004   std::string file_name = "objects-body-descriptors-inl.inc";
5005   std::stringstream h_contents;
5006 
5007     for (const ClassType* type : TypeOracle::GetClasses()) {
5008       std::string name = type->name();
5009       if (!type->ShouldGenerateBodyDescriptor()) continue;
5010 
5011       bool has_array_fields = !type->HasStaticSize();
5012       std::vector<ObjectSlotKind> header_slot_kinds =
5013           type->ComputeHeaderSlotKinds();
5014       base::Optional<ObjectSlotKind> array_slot_kind =
5015           type->ComputeArraySlotKind();
5016       DCHECK_EQ(has_array_fields, array_slot_kind.has_value());
5017 
5018       h_contents << "class " << name << "::BodyDescriptor final : public ";
5019       if (auto descriptor_name = MatchSimpleBodyDescriptor(type)) {
5020         h_contents << *descriptor_name << " {\n";
5021         h_contents << " public:\n";
5022       } else {
5023         h_contents << "BodyDescriptorBase {\n";
5024         h_contents << " public:\n";
5025 
5026         h_contents << "  static bool IsValidSlot(Map map, HeapObject obj, int "
5027                       "offset) {\n";
5028         if (has_array_fields) {
5029           h_contents << "    if (offset < kHeaderSize) {\n";
5030         }
5031         h_contents << "      bool valid_slots[] = {";
5032         for (ObjectSlotKind slot : header_slot_kinds) {
5033           h_contents << (slot != ObjectSlotKind::kNoPointer ? "1" : "0") << ",";
5034         }
5035         h_contents << "};\n"
5036                    << "      return valid_slots[static_cast<unsigned "
5037                       "int>(offset)/kTaggedSize];\n";
5038         if (has_array_fields) {
5039           h_contents << "    }\n";
5040           bool array_is_tagged = *array_slot_kind != ObjectSlotKind::kNoPointer;
5041           h_contents << "    return " << (array_is_tagged ? "true" : "false")
5042                      << ";\n";
5043         }
5044         h_contents << "  }\n\n";
5045 
5046         h_contents << "  template <typename ObjectVisitor>\n";
5047         h_contents
5048             << "  static inline void IterateBody(Map map, HeapObject obj, "
5049                "int object_size, ObjectVisitor* v) {\n";
5050 
5051         std::vector<ObjectSlotKind> slots = std::move(header_slot_kinds);
5052         if (has_array_fields) slots.push_back(*array_slot_kind);
5053 
5054         // Skip the map slot.
5055         slots.erase(slots.begin());
5056         size_t start_offset = TargetArchitecture::TaggedSize();
5057 
5058         size_t end_offset = start_offset;
5059         ObjectSlotKind section_kind;
5060         for (size_t i = 0; i <= slots.size(); ++i) {
5061           base::Optional<ObjectSlotKind> next_section_kind;
5062           bool finished_section = false;
5063           if (i == 0) {
5064             next_section_kind = slots[i];
5065           } else if (i < slots.size()) {
5066             if (auto combined = Combine(section_kind, slots[i])) {
5067               next_section_kind = *combined;
5068             } else {
5069               next_section_kind = slots[i];
5070               finished_section = true;
5071             }
5072           } else {
5073             finished_section = true;
5074           }
5075           if (finished_section) {
5076             bool is_array_slot = i == slots.size() && has_array_fields;
5077             bool multiple_slots =
5078                 is_array_slot ||
5079                 (end_offset - start_offset > TargetArchitecture::TaggedSize());
5080             base::Optional<std::string> iterate_command;
5081             switch (section_kind) {
5082               case ObjectSlotKind::kStrongPointer:
5083                 iterate_command = "IteratePointer";
5084                 break;
5085               case ObjectSlotKind::kMaybeObjectPointer:
5086                 iterate_command = "IterateMaybeWeakPointer";
5087                 break;
5088               case ObjectSlotKind::kCustomWeakPointer:
5089                 iterate_command = "IterateCustomWeakPointer";
5090                 break;
5091               case ObjectSlotKind::kNoPointer:
5092                 break;
5093             }
5094             if (iterate_command) {
5095               if (multiple_slots) *iterate_command += "s";
5096               h_contents << "    " << *iterate_command << "(obj, "
5097                          << start_offset;
5098               if (multiple_slots) {
5099                 h_contents << ", "
5100                            << (i == slots.size() ? "object_size"
5101                                                  : std::to_string(end_offset));
5102               }
5103               h_contents << ", v);\n";
5104             }
5105             start_offset = end_offset;
5106           }
5107           if (i < slots.size()) section_kind = *next_section_kind;
5108           end_offset += TargetArchitecture::TaggedSize();
5109         }
5110 
5111         h_contents << "  }\n\n";
5112       }
5113 
5114       h_contents
5115           << "  static inline int SizeOf(Map map, HeapObject raw_object) {\n";
5116       if (type->size().SingleValue()) {
5117         h_contents << "    return " << *type->size().SingleValue() << ";\n";
5118       } else {
5119         // We use an unchecked_cast here because this is used for concurrent
5120         // marking, where we shouldn't re-read the map.
5121         h_contents << "    return " << name
5122                    << "::unchecked_cast(raw_object).AllocatedSize();\n";
5123       }
5124       h_contents << "  }\n\n";
5125 
5126       h_contents << "};\n";
5127     }
5128 
5129     WriteFile(output_directory + "/" + file_name, h_contents.str());
5130 }
5131 
5132 namespace {
5133 
5134 // Generate verification code for a single piece of class data, which might be
5135 // nested within a struct or might be a single element in an indexed field (or
5136 // both).
GenerateFieldValueVerifier(const std::string & class_name,bool indexed,std::string offset,const Field & leaf_field,std::string indexed_field_size,std::ostream & cc_contents,bool is_map)5137 void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
5138                                 std::string offset, const Field& leaf_field,
5139                                 std::string indexed_field_size,
5140                                 std::ostream& cc_contents, bool is_map) {
5141   const Type* field_type = leaf_field.name_and_type.type;
5142 
5143   bool maybe_object =
5144       !field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType());
5145   const char* object_type = maybe_object ? "MaybeObject" : "Object";
5146   const char* verify_fn =
5147       maybe_object ? "VerifyMaybeObjectPointer" : "VerifyPointer";
5148   if (indexed) {
5149     offset += " + i * " + indexed_field_size;
5150   }
5151   // Name the local var based on the field name for nicer CHECK output.
5152   const std::string value = leaf_field.name_and_type.name + "__value";
5153 
5154   // Read the field.
5155   if (is_map) {
5156     cc_contents << "    " << object_type << " " << value << " = o.map();\n";
5157   } else {
5158     cc_contents << "    " << object_type << " " << value << " = TaggedField<"
5159                 << object_type << ">::load(o, " << offset << ");\n";
5160   }
5161 
5162   // Call VerifyPointer or VerifyMaybeObjectPointer on it.
5163   cc_contents << "    " << object_type << "::" << verify_fn << "(isolate, "
5164               << value << ");\n";
5165 
5166   // Check that the value is of an appropriate type. We can skip this part for
5167   // the Object type because it would not check anything beyond what we already
5168   // checked with VerifyPointer.
5169   if (field_type != TypeOracle::GetObjectType()) {
5170     cc_contents << "    CHECK(" << GenerateRuntimeTypeCheck(field_type, value)
5171                 << ");\n";
5172   }
5173 }
5174 
GenerateClassFieldVerifier(const std::string & class_name,const ClassType & class_type,const Field & f,std::ostream & h_contents,std::ostream & cc_contents)5175 void GenerateClassFieldVerifier(const std::string& class_name,
5176                                 const ClassType& class_type, const Field& f,
5177                                 std::ostream& h_contents,
5178                                 std::ostream& cc_contents) {
5179   const Type* field_type = f.name_and_type.type;
5180 
5181   // We only verify tagged types, not raw numbers or pointers. Structs
5182   // consisting of tagged types are also included.
5183   if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) &&
5184       !field_type->StructSupertype())
5185     return;
5186   if (field_type == TypeOracle::GetFloat64OrHoleType()) return;
5187   // Do not verify if the field may be uninitialized.
5188   if (TypeOracle::GetUninitializedType()->IsSubtypeOf(field_type)) return;
5189 
5190   std::string field_start_offset;
5191   if (f.index) {
5192     field_start_offset = f.name_and_type.name + "__offset";
5193     std::string length = f.name_and_type.name + "__length";
5194     cc_contents << "  intptr_t " << field_start_offset << ", " << length
5195                 << ";\n";
5196     cc_contents << "  std::tie(std::ignore, " << field_start_offset << ", "
5197                 << length << ") = "
5198                 << Callable::PrefixNameForCCOutput(
5199                        class_type.GetSliceMacroName(f))
5200                 << "(o);\n";
5201 
5202     // Slices use intptr, but TaggedField<T>.load() uses int, so verify that
5203     // such a cast is valid.
5204     cc_contents << "  CHECK_EQ(" << field_start_offset << ", static_cast<int>("
5205                 << field_start_offset << "));\n";
5206     cc_contents << "  CHECK_EQ(" << length << ", static_cast<int>(" << length
5207                 << "));\n";
5208     field_start_offset = "static_cast<int>(" + field_start_offset + ")";
5209     length = "static_cast<int>(" + length + ")";
5210 
5211     cc_contents << "  for (int i = 0; i < " << length << "; ++i) {\n";
5212   } else {
5213     // Non-indexed fields have known offsets.
5214     field_start_offset = std::to_string(*f.offset);
5215     cc_contents << "  {\n";
5216   }
5217 
5218   if (auto struct_type = field_type->StructSupertype()) {
5219     for (const Field& struct_field : (*struct_type)->fields()) {
5220       if (struct_field.name_and_type.type->IsSubtypeOf(
5221               TypeOracle::GetTaggedType())) {
5222         GenerateFieldValueVerifier(
5223             class_name, f.index.has_value(),
5224             field_start_offset + " + " + std::to_string(*struct_field.offset),
5225             struct_field, std::to_string((*struct_type)->PackedSize()),
5226             cc_contents, f.name_and_type.name == "map");
5227       }
5228     }
5229   } else {
5230     GenerateFieldValueVerifier(class_name, f.index.has_value(),
5231                                field_start_offset, f, "kTaggedSize",
5232                                cc_contents, f.name_and_type.name == "map");
5233   }
5234 
5235   cc_contents << "  }\n";
5236 }
5237 
5238 }  // namespace
5239 
GenerateClassVerifiers(const std::string & output_directory)5240 void ImplementationVisitor::GenerateClassVerifiers(
5241     const std::string& output_directory) {
5242   std::string file_name = "class-verifiers";
5243   std::stringstream h_contents;
5244   std::stringstream cc_contents;
5245   {
5246     IncludeGuardScope include_guard(h_contents, file_name + ".h");
5247     IfDefScope verify_heap_h(h_contents, "VERIFY_HEAP");
5248     IfDefScope verify_heap_cc(cc_contents, "VERIFY_HEAP");
5249 
5250     h_contents << "#include \"src/base/macros.h\"\n\n";
5251 
5252     cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n\n";
5253     cc_contents << "#include \"src/objects/all-objects-inl.h\"\n";
5254 
5255     IncludeObjectMacrosScope object_macros(cc_contents);
5256 
5257     NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
5258     NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});
5259 
5260     cc_contents
5261         << "#include \"torque-generated/test/torque/test-torque-tq-inl.inc\"\n";
5262 
5263     // Generate forward declarations to avoid including any headers.
5264     h_contents << "class Isolate;\n";
5265     for (const ClassType* type : TypeOracle::GetClasses()) {
5266       if (!type->ShouldGenerateVerify()) continue;
5267       h_contents << "class " << type->name() << ";\n";
5268     }
5269 
5270     const char* verifier_class = "TorqueGeneratedClassVerifiers";
5271 
5272     h_contents << "class V8_EXPORT_PRIVATE " << verifier_class << "{\n";
5273     h_contents << " public:\n";
5274 
5275     for (const ClassType* type : TypeOracle::GetClasses()) {
5276       std::string name = type->name();
5277       if (!type->ShouldGenerateVerify()) continue;
5278 
5279       std::string method_name = name + "Verify";
5280 
5281       h_contents << "  static void " << method_name << "(" << name
5282                  << " o, Isolate* isolate);\n";
5283 
5284       cc_contents << "void " << verifier_class << "::" << method_name << "("
5285                   << name << " o, Isolate* isolate) {\n";
5286 
5287       // First, do any verification for the super class. Not all classes have
5288       // verifiers, so skip to the nearest super class that has one.
5289       const ClassType* super_type = type->GetSuperClass();
5290       while (super_type && !super_type->ShouldGenerateVerify()) {
5291         super_type = super_type->GetSuperClass();
5292       }
5293       if (super_type) {
5294         std::string super_name = super_type->name();
5295         cc_contents << "  o." << super_name << "Verify(isolate);\n";
5296       }
5297 
5298       // Second, verify that this object is what it claims to be.
5299       cc_contents << "  CHECK(o.Is" << name << "(isolate));\n";
5300 
5301       // Third, verify its properties.
5302       for (auto f : type->fields()) {
5303         GenerateClassFieldVerifier(name, *type, f, h_contents, cc_contents);
5304       }
5305 
5306       cc_contents << "}\n";
5307     }
5308 
5309     h_contents << "};\n";
5310   }
5311   WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
5312   WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
5313 }
5314 
GenerateEnumVerifiers(const std::string & output_directory)5315 void ImplementationVisitor::GenerateEnumVerifiers(
5316     const std::string& output_directory) {
5317   std::string file_name = "enum-verifiers";
5318   std::stringstream cc_contents;
5319   {
5320     cc_contents << "#include \"src/compiler/code-assembler.h\"\n";
5321     for (const std::string& include_path : GlobalContext::CppIncludes()) {
5322       cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
5323     }
5324     cc_contents << "\n";
5325 
5326     NamespaceScope cc_namespaces(cc_contents, {"v8", "internal", ""});
5327 
5328     cc_contents << "class EnumVerifier {\n";
5329     for (const auto& desc : GlobalContext::Get().ast()->EnumDescriptions()) {
5330       cc_contents << "  // " << desc.name << " (" << desc.pos << ")\n";
5331       cc_contents << "  void VerifyEnum_" << desc.name << "("
5332                   << desc.constexpr_generates
5333                   << " x) {\n"
5334                      "    switch(x) {\n";
5335       for (const auto& entry : desc.entries) {
5336         cc_contents << "      case " << entry << ": break;\n";
5337       }
5338       if (desc.is_open) cc_contents << "      default: break;\n";
5339       cc_contents << "    }\n  }\n\n";
5340     }
5341     cc_contents << "};\n";
5342   }
5343 
5344   WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
5345 }
5346 
GenerateExportedMacrosAssembler(const std::string & output_directory)5347 void ImplementationVisitor::GenerateExportedMacrosAssembler(
5348     const std::string& output_directory) {
5349   std::string file_name = "exported-macros-assembler";
5350   std::stringstream h_contents;
5351   std::stringstream cc_contents;
5352   {
5353     IncludeGuardScope include_guard(h_contents, file_name + ".h");
5354 
5355     h_contents << "#include \"src/compiler/code-assembler.h\"\n";
5356     h_contents << "#include \"src/execution/frames.h\"\n";
5357     h_contents << "#include \"torque-generated/csa-types.h\"\n";
5358 
5359     for (const std::string& include_path : GlobalContext::CppIncludes()) {
5360       cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
5361     }
5362     cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
5363 
5364     for (SourceId file : SourceFileMap::AllSources()) {
5365       cc_contents << "#include \"torque-generated/" +
5366                          SourceFileMap::PathFromV8RootWithoutExtension(file) +
5367                          "-tq-csa.h\"\n";
5368     }
5369 
5370     NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
5371     NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});
5372 
5373     h_contents << "class V8_EXPORT_PRIVATE "
5374                   "TorqueGeneratedExportedMacrosAssembler {\n"
5375                << " public:\n"
5376                << "  explicit TorqueGeneratedExportedMacrosAssembler"
5377                   "(compiler::CodeAssemblerState* state) : state_(state) {\n"
5378                << "    USE(state_);\n"
5379                << "  }\n";
5380 
5381     for (auto& declarable : GlobalContext::AllDeclarables()) {
5382       TorqueMacro* macro = TorqueMacro::DynamicCast(declarable.get());
5383       if (!(macro && macro->IsExportedToCSA())) continue;
5384       CurrentSourcePosition::Scope position_activator(macro->Position());
5385 
5386       cpp::Class assembler("TorqueGeneratedExportedMacrosAssembler");
5387       std::vector<std::string> generated_parameter_names;
5388       cpp::Function f = GenerateFunction(
5389           &assembler, macro->ReadableName(), macro->signature(),
5390           macro->parameter_names(), false, &generated_parameter_names);
5391 
5392       f.PrintDeclaration(h_contents);
5393       f.PrintDefinition(cc_contents, [&](std::ostream& stream) {
5394         stream << "return " << macro->ExternalName() << "(state_";
5395         for (const auto& name : generated_parameter_names) {
5396           stream << ", " << name;
5397         }
5398         stream << ");";
5399       });
5400     }
5401 
5402     h_contents << " private:\n"
5403                << "  compiler::CodeAssemblerState* state_;\n"
5404                << "};\n";
5405   }
5406   WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
5407   WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
5408 }
5409 
5410 namespace {
5411 
CollectAllFields(const std::string & path,const Field & field,std::vector<std::string> & result)5412 void CollectAllFields(const std::string& path, const Field& field,
5413                       std::vector<std::string>& result) {
5414   if (field.name_and_type.type->StructSupertype()) {
5415     std::string next_path = path + field.name_and_type.name + ".";
5416     const StructType* struct_type =
5417         StructType::DynamicCast(field.name_and_type.type);
5418     for (const auto& inner_field : struct_type->fields()) {
5419       CollectAllFields(next_path, inner_field, result);
5420     }
5421   } else {
5422     result.push_back(path + field.name_and_type.name);
5423   }
5424 }
5425 
5426 }  // namespace
5427 
GenerateCSATypes(const std::string & output_directory)5428 void ImplementationVisitor::GenerateCSATypes(
5429     const std::string& output_directory) {
5430   std::string file_name = "csa-types";
5431   std::stringstream h_contents;
5432   {
5433     IncludeGuardScope include_guard(h_contents, file_name + ".h");
5434     h_contents << "#include \"src/compiler/code-assembler.h\"\n\n";
5435 
5436     NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
5437 
5438     // Generates headers for all structs in a topologically-sorted order, since
5439     // TypeOracle keeps them in the order of their resolution
5440     for (const auto& type : TypeOracle::GetAggregateTypes()) {
5441       const StructType* struct_type = StructType::DynamicCast(type.get());
5442       if (!struct_type) continue;
5443       h_contents << "struct " << struct_type->GetGeneratedTypeNameImpl()
5444                  << " {\n";
5445       for (auto& field : struct_type->fields()) {
5446         h_contents << "  " << field.name_and_type.type->GetGeneratedTypeName();
5447         h_contents << " " << field.name_and_type.name << ";\n";
5448       }
5449       h_contents << "\n  std::tuple<";
5450       bool first = true;
5451       for (const Type* lowered_type : LowerType(struct_type)) {
5452         if (!first) {
5453           h_contents << ", ";
5454         }
5455         first = false;
5456         h_contents << lowered_type->GetGeneratedTypeName();
5457       }
5458       std::vector<std::string> all_fields;
5459       for (auto& field : struct_type->fields()) {
5460         CollectAllFields("", field, all_fields);
5461       }
5462       h_contents << "> Flatten() const {\n"
5463                     "    return std::make_tuple(";
5464       PrintCommaSeparatedList(h_contents, all_fields);
5465       h_contents << ");\n";
5466       h_contents << "  }\n";
5467       h_contents << "};\n";
5468     }
5469   }
5470   WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
5471 }
5472 
ReportAllUnusedMacros()5473 void ReportAllUnusedMacros() {
5474   for (const auto& declarable : GlobalContext::AllDeclarables()) {
5475     if (!declarable->IsMacro() || declarable->IsExternMacro()) continue;
5476 
5477     Macro* macro = Macro::cast(declarable.get());
5478     if (macro->IsUsed()) continue;
5479 
5480     if (macro->IsTorqueMacro() && TorqueMacro::cast(macro)->IsExportedToCSA()) {
5481       continue;
5482     }
5483     // TODO(gsps): Mark methods of generic structs used if they are used in any
5484     // instantiation
5485     if (Method* method = Method::DynamicCast(macro)) {
5486       if (StructType* struct_type =
5487               StructType::DynamicCast(method->aggregate_type())) {
5488         if (struct_type->GetSpecializedFrom().has_value()) {
5489           continue;
5490         }
5491       }
5492     }
5493 
5494     std::vector<std::string> ignored_prefixes = {"Convert<", "Cast<",
5495                                                  "FromConstexpr<"};
5496     const std::string name = macro->ReadableName();
5497     const bool ignore =
5498         StartsWithSingleUnderscore(name) ||
5499         std::any_of(ignored_prefixes.begin(), ignored_prefixes.end(),
5500                     [&name](const std::string& prefix) {
5501                       return StringStartsWith(name, prefix);
5502                     });
5503 
5504     if (!ignore) {
5505       Lint("Macro '", macro->ReadableName(), "' is never used.")
5506           .Position(macro->IdentifierPosition());
5507     }
5508   }
5509 }
5510 
5511 }  // namespace torque
5512 }  // namespace internal
5513 }  // namespace v8
5514