1 //! Defines database & queries for macro expansion.
2
3 use base_db::{salsa, Edition, SourceDatabase};
4 use either::Either;
5 use limit::Limit;
6 use mbe::syntax_node_to_token_tree;
7 use rustc_hash::FxHashSet;
8 use syntax::{
9 ast::{self, HasAttrs, HasDocComments},
10 AstNode, GreenNode, Parse, SyntaxError, SyntaxNode, SyntaxToken, T,
11 };
12 use triomphe::Arc;
13
14 use crate::{
15 ast_id_map::AstIdMap, builtin_attr_macro::pseudo_derive_attr_expansion,
16 builtin_fn_macro::EagerExpander, fixup, hygiene::HygieneFrame, tt, BuiltinAttrExpander,
17 BuiltinDeriveExpander, BuiltinFnLikeExpander, EagerCallInfo, ExpandError, ExpandResult,
18 ExpandTo, HirFileId, HirFileIdRepr, MacroCallId, MacroCallKind, MacroCallLoc, MacroDefId,
19 MacroDefKind, MacroFile, ProcMacroExpander,
20 };
21
22 /// Total limit on the number of tokens produced by any macro invocation.
23 ///
24 /// If an invocation produces more tokens than this limit, it will not be stored in the database and
25 /// an error will be emitted.
26 ///
27 /// Actual max for `analysis-stats .` at some point: 30672.
28 static TOKEN_LIMIT: Limit = Limit::new(1_048_576);
29
30 #[derive(Debug, Clone, Eq, PartialEq)]
31 pub enum TokenExpander {
32 /// Old-style `macro_rules` or the new macros 2.0
33 DeclarativeMacro { mac: mbe::DeclarativeMacro, def_site_token_map: mbe::TokenMap },
34 /// Stuff like `line!` and `file!`.
35 Builtin(BuiltinFnLikeExpander),
36 /// Built-in eagerly expanded fn-like macros (`include!`, `concat!`, etc.)
37 BuiltinEager(EagerExpander),
38 /// `global_allocator` and such.
39 BuiltinAttr(BuiltinAttrExpander),
40 /// `derive(Copy)` and such.
41 BuiltinDerive(BuiltinDeriveExpander),
42 /// The thing we love the most here in rust-analyzer -- procedural macros.
43 ProcMacro(ProcMacroExpander),
44 }
45
46 impl TokenExpander {
expand( &self, db: &dyn ExpandDatabase, id: MacroCallId, tt: &tt::Subtree, ) -> ExpandResult<tt::Subtree>47 fn expand(
48 &self,
49 db: &dyn ExpandDatabase,
50 id: MacroCallId,
51 tt: &tt::Subtree,
52 ) -> ExpandResult<tt::Subtree> {
53 match self {
54 TokenExpander::DeclarativeMacro { mac, .. } => mac.expand(tt).map_err(Into::into),
55 TokenExpander::Builtin(it) => it.expand(db, id, tt).map_err(Into::into),
56 TokenExpander::BuiltinEager(it) => it.expand(db, id, tt).map_err(Into::into),
57 TokenExpander::BuiltinAttr(it) => it.expand(db, id, tt),
58 TokenExpander::BuiltinDerive(it) => it.expand(db, id, tt),
59 TokenExpander::ProcMacro(_) => {
60 // We store the result in salsa db to prevent non-deterministic behavior in
61 // some proc-macro implementation
62 // See #4315 for details
63 db.expand_proc_macro(id)
64 }
65 }
66 }
67
map_id_down(&self, id: tt::TokenId) -> tt::TokenId68 pub(crate) fn map_id_down(&self, id: tt::TokenId) -> tt::TokenId {
69 match self {
70 TokenExpander::DeclarativeMacro { mac, .. } => mac.map_id_down(id),
71 TokenExpander::Builtin(..)
72 | TokenExpander::BuiltinEager(..)
73 | TokenExpander::BuiltinAttr(..)
74 | TokenExpander::BuiltinDerive(..)
75 | TokenExpander::ProcMacro(..) => id,
76 }
77 }
78
map_id_up(&self, id: tt::TokenId) -> (tt::TokenId, mbe::Origin)79 pub(crate) fn map_id_up(&self, id: tt::TokenId) -> (tt::TokenId, mbe::Origin) {
80 match self {
81 TokenExpander::DeclarativeMacro { mac, .. } => mac.map_id_up(id),
82 TokenExpander::Builtin(..)
83 | TokenExpander::BuiltinEager(..)
84 | TokenExpander::BuiltinAttr(..)
85 | TokenExpander::BuiltinDerive(..)
86 | TokenExpander::ProcMacro(..) => (id, mbe::Origin::Call),
87 }
88 }
89 }
90
91 #[salsa::query_group(ExpandDatabaseStorage)]
92 pub trait ExpandDatabase: SourceDatabase {
ast_id_map(&self, file_id: HirFileId) -> Arc<AstIdMap>93 fn ast_id_map(&self, file_id: HirFileId) -> Arc<AstIdMap>;
94
95 /// Main public API -- parses a hir file, not caring whether it's a real
96 /// file or a macro expansion.
97 #[salsa::transparent]
parse_or_expand(&self, file_id: HirFileId) -> SyntaxNode98 fn parse_or_expand(&self, file_id: HirFileId) -> SyntaxNode;
99 #[salsa::transparent]
parse_or_expand_with_err(&self, file_id: HirFileId) -> ExpandResult<Parse<SyntaxNode>>100 fn parse_or_expand_with_err(&self, file_id: HirFileId) -> ExpandResult<Parse<SyntaxNode>>;
101 /// Implementation for the macro case.
102 // This query is LRU cached
parse_macro_expansion( &self, macro_file: MacroFile, ) -> ExpandResult<(Parse<SyntaxNode>, Arc<mbe::TokenMap>)>103 fn parse_macro_expansion(
104 &self,
105 macro_file: MacroFile,
106 ) -> ExpandResult<(Parse<SyntaxNode>, Arc<mbe::TokenMap>)>;
107
108 /// Macro ids. That's probably the tricksiest bit in rust-analyzer, and the
109 /// reason why we use salsa at all.
110 ///
111 /// We encode macro definitions into ids of macro calls, this what allows us
112 /// to be incremental.
113 #[salsa::interned]
intern_macro_call(&self, macro_call: MacroCallLoc) -> MacroCallId114 fn intern_macro_call(&self, macro_call: MacroCallLoc) -> MacroCallId;
115
116 /// Lowers syntactic macro call to a token tree representation.
117 #[salsa::transparent]
macro_arg( &self, id: MacroCallId, ) -> Option<Arc<(tt::Subtree, mbe::TokenMap, fixup::SyntaxFixupUndoInfo)>>118 fn macro_arg(
119 &self,
120 id: MacroCallId,
121 ) -> Option<Arc<(tt::Subtree, mbe::TokenMap, fixup::SyntaxFixupUndoInfo)>>;
122 /// Extracts syntax node, corresponding to a macro call. That's a firewall
123 /// query, only typing in the macro call itself changes the returned
124 /// subtree.
macro_arg_text(&self, id: MacroCallId) -> Option<GreenNode>125 fn macro_arg_text(&self, id: MacroCallId) -> Option<GreenNode>;
126 /// Gets the expander for this macro. This compiles declarative macros, and
127 /// just fetches procedural ones.
macro_def(&self, id: MacroDefId) -> Result<Arc<TokenExpander>, mbe::ParseError>128 fn macro_def(&self, id: MacroDefId) -> Result<Arc<TokenExpander>, mbe::ParseError>;
129
130 /// Expand macro call to a token tree.
131 // This query is LRU cached
macro_expand(&self, macro_call: MacroCallId) -> ExpandResult<Arc<tt::Subtree>>132 fn macro_expand(&self, macro_call: MacroCallId) -> ExpandResult<Arc<tt::Subtree>>;
133 #[salsa::invoke(crate::builtin_fn_macro::include_arg_to_tt)]
include_expand( &self, arg_id: MacroCallId, ) -> Result< (triomphe::Arc<(::tt::Subtree<::tt::TokenId>, mbe::TokenMap)>, base_db::FileId), ExpandError, >134 fn include_expand(
135 &self,
136 arg_id: MacroCallId,
137 ) -> Result<
138 (triomphe::Arc<(::tt::Subtree<::tt::TokenId>, mbe::TokenMap)>, base_db::FileId),
139 ExpandError,
140 >;
141 /// Special case of the previous query for procedural macros. We can't LRU
142 /// proc macros, since they are not deterministic in general, and
143 /// non-determinism breaks salsa in a very, very, very bad way.
144 /// @edwin0cheng heroically debugged this once!
expand_proc_macro(&self, call: MacroCallId) -> ExpandResult<tt::Subtree>145 fn expand_proc_macro(&self, call: MacroCallId) -> ExpandResult<tt::Subtree>;
146 /// Firewall query that returns the errors from the `parse_macro_expansion` query.
parse_macro_expansion_error( &self, macro_call: MacroCallId, ) -> ExpandResult<Box<[SyntaxError]>>147 fn parse_macro_expansion_error(
148 &self,
149 macro_call: MacroCallId,
150 ) -> ExpandResult<Box<[SyntaxError]>>;
151
hygiene_frame(&self, file_id: HirFileId) -> Arc<HygieneFrame>152 fn hygiene_frame(&self, file_id: HirFileId) -> Arc<HygieneFrame>;
153 }
154
155 /// This expands the given macro call, but with different arguments. This is
156 /// used for completion, where we want to see what 'would happen' if we insert a
157 /// token. The `token_to_map` mapped down into the expansion, with the mapped
158 /// token returned.
expand_speculative( db: &dyn ExpandDatabase, actual_macro_call: MacroCallId, speculative_args: &SyntaxNode, token_to_map: SyntaxToken, ) -> Option<(SyntaxNode, SyntaxToken)>159 pub fn expand_speculative(
160 db: &dyn ExpandDatabase,
161 actual_macro_call: MacroCallId,
162 speculative_args: &SyntaxNode,
163 token_to_map: SyntaxToken,
164 ) -> Option<(SyntaxNode, SyntaxToken)> {
165 let loc = db.lookup_intern_macro_call(actual_macro_call);
166 let macro_def = db.macro_def(loc.def).ok()?;
167 let token_range = token_to_map.text_range();
168
169 // Build the subtree and token mapping for the speculative args
170 let censor = censor_for_macro_input(&loc, speculative_args);
171 let mut fixups = fixup::fixup_syntax(speculative_args);
172 fixups.replace.extend(censor.into_iter().map(|node| (node.into(), Vec::new())));
173 let (mut tt, spec_args_tmap, _) = mbe::syntax_node_to_token_tree_with_modifications(
174 speculative_args,
175 fixups.token_map,
176 fixups.next_id,
177 fixups.replace,
178 fixups.append,
179 );
180
181 let (attr_arg, token_id) = match loc.kind {
182 MacroCallKind::Attr { invoc_attr_index, .. } => {
183 let attr = if loc.def.is_attribute_derive() {
184 // for pseudo-derive expansion we actually pass the attribute itself only
185 ast::Attr::cast(speculative_args.clone())
186 } else {
187 // Attributes may have an input token tree, build the subtree and map for this as well
188 // then try finding a token id for our token if it is inside this input subtree.
189 let item = ast::Item::cast(speculative_args.clone())?;
190 item.doc_comments_and_attrs()
191 .nth(invoc_attr_index.ast_index())
192 .and_then(Either::left)
193 }?;
194 match attr.token_tree() {
195 Some(token_tree) => {
196 let (mut tree, map) = syntax_node_to_token_tree(attr.token_tree()?.syntax());
197 tree.delimiter = tt::Delimiter::unspecified();
198
199 let shift = mbe::Shift::new(&tt);
200 shift.shift_all(&mut tree);
201
202 let token_id = if token_tree.syntax().text_range().contains_range(token_range) {
203 let attr_input_start =
204 token_tree.left_delimiter_token()?.text_range().start();
205 let range = token_range.checked_sub(attr_input_start)?;
206 let token_id = shift.shift(map.token_by_range(range)?);
207 Some(token_id)
208 } else {
209 None
210 };
211 (Some(tree), token_id)
212 }
213 _ => (None, None),
214 }
215 }
216 _ => (None, None),
217 };
218 let token_id = match token_id {
219 Some(token_id) => token_id,
220 // token wasn't inside an attribute input so it has to be in the general macro input
221 None => {
222 let range = token_range.checked_sub(speculative_args.text_range().start())?;
223 let token_id = spec_args_tmap.token_by_range(range)?;
224 macro_def.map_id_down(token_id)
225 }
226 };
227
228 // Do the actual expansion, we need to directly expand the proc macro due to the attribute args
229 // Otherwise the expand query will fetch the non speculative attribute args and pass those instead.
230 let mut speculative_expansion = match loc.def.kind {
231 MacroDefKind::ProcMacro(expander, ..) => {
232 tt.delimiter = tt::Delimiter::unspecified();
233 expander.expand(db, loc.def.krate, loc.krate, &tt, attr_arg.as_ref())
234 }
235 MacroDefKind::BuiltInAttr(BuiltinAttrExpander::Derive, _) => {
236 pseudo_derive_attr_expansion(&tt, attr_arg.as_ref()?)
237 }
238 _ => macro_def.expand(db, actual_macro_call, &tt),
239 };
240
241 let expand_to = macro_expand_to(db, actual_macro_call);
242 fixup::reverse_fixups(&mut speculative_expansion.value, &spec_args_tmap, &fixups.undo_info);
243 let (node, rev_tmap) = token_tree_to_syntax_node(&speculative_expansion.value, expand_to);
244
245 let syntax_node = node.syntax_node();
246 let token = rev_tmap
247 .ranges_by_token(token_id, token_to_map.kind())
248 .filter_map(|range| syntax_node.covering_element(range).into_token())
249 .min_by_key(|t| {
250 // prefer tokens of the same kind and text
251 // Note the inversion of the score here, as we want to prefer the first token in case
252 // of all tokens having the same score
253 (t.kind() != token_to_map.kind()) as u8 + (t.text() != token_to_map.text()) as u8
254 })?;
255 Some((node.syntax_node(), token))
256 }
257
ast_id_map(db: &dyn ExpandDatabase, file_id: HirFileId) -> Arc<AstIdMap>258 fn ast_id_map(db: &dyn ExpandDatabase, file_id: HirFileId) -> Arc<AstIdMap> {
259 Arc::new(AstIdMap::from_source(&db.parse_or_expand(file_id)))
260 }
261
parse_or_expand(db: &dyn ExpandDatabase, file_id: HirFileId) -> SyntaxNode262 fn parse_or_expand(db: &dyn ExpandDatabase, file_id: HirFileId) -> SyntaxNode {
263 match file_id.repr() {
264 HirFileIdRepr::FileId(file_id) => db.parse(file_id).tree().syntax().clone(),
265 HirFileIdRepr::MacroFile(macro_file) => {
266 db.parse_macro_expansion(macro_file).value.0.syntax_node()
267 }
268 }
269 }
270
parse_or_expand_with_err( db: &dyn ExpandDatabase, file_id: HirFileId, ) -> ExpandResult<Parse<SyntaxNode>>271 fn parse_or_expand_with_err(
272 db: &dyn ExpandDatabase,
273 file_id: HirFileId,
274 ) -> ExpandResult<Parse<SyntaxNode>> {
275 match file_id.repr() {
276 HirFileIdRepr::FileId(file_id) => ExpandResult::ok(db.parse(file_id).to_syntax()),
277 HirFileIdRepr::MacroFile(macro_file) => {
278 db.parse_macro_expansion(macro_file).map(|(it, _)| it)
279 }
280 }
281 }
282
parse_macro_expansion( db: &dyn ExpandDatabase, macro_file: MacroFile, ) -> ExpandResult<(Parse<SyntaxNode>, Arc<mbe::TokenMap>)>283 fn parse_macro_expansion(
284 db: &dyn ExpandDatabase,
285 macro_file: MacroFile,
286 ) -> ExpandResult<(Parse<SyntaxNode>, Arc<mbe::TokenMap>)> {
287 let _p = profile::span("parse_macro_expansion");
288 let mbe::ValueResult { value: tt, err } = db.macro_expand(macro_file.macro_call_id);
289
290 let expand_to = macro_expand_to(db, macro_file.macro_call_id);
291
292 tracing::debug!("expanded = {}", tt.as_debug_string());
293 tracing::debug!("kind = {:?}", expand_to);
294
295 let (parse, rev_token_map) = token_tree_to_syntax_node(&tt, expand_to);
296
297 ExpandResult { value: (parse, Arc::new(rev_token_map)), err }
298 }
299
macro_arg( db: &dyn ExpandDatabase, id: MacroCallId, ) -> Option<Arc<(tt::Subtree, mbe::TokenMap, fixup::SyntaxFixupUndoInfo)>>300 fn macro_arg(
301 db: &dyn ExpandDatabase,
302 id: MacroCallId,
303 ) -> Option<Arc<(tt::Subtree, mbe::TokenMap, fixup::SyntaxFixupUndoInfo)>> {
304 let loc = db.lookup_intern_macro_call(id);
305
306 if let Some(EagerCallInfo { arg, arg_id: Some(_), error: _ }) = loc.eager.as_deref() {
307 return Some(Arc::new((arg.0.clone(), arg.1.clone(), Default::default())));
308 }
309
310 let arg = db.macro_arg_text(id)?;
311
312 let node = SyntaxNode::new_root(arg);
313 let censor = censor_for_macro_input(&loc, &node);
314 let mut fixups = fixup::fixup_syntax(&node);
315 fixups.replace.extend(censor.into_iter().map(|node| (node.into(), Vec::new())));
316 let (mut tt, tmap, _) = mbe::syntax_node_to_token_tree_with_modifications(
317 &node,
318 fixups.token_map,
319 fixups.next_id,
320 fixups.replace,
321 fixups.append,
322 );
323
324 if loc.def.is_proc_macro() {
325 // proc macros expect their inputs without parentheses, MBEs expect it with them included
326 tt.delimiter = tt::Delimiter::unspecified();
327 }
328 Some(Arc::new((tt, tmap, fixups.undo_info)))
329 }
330
censor_for_macro_input(loc: &MacroCallLoc, node: &SyntaxNode) -> FxHashSet<SyntaxNode>331 fn censor_for_macro_input(loc: &MacroCallLoc, node: &SyntaxNode) -> FxHashSet<SyntaxNode> {
332 // FIXME: handle `cfg_attr`
333 (|| {
334 let censor = match loc.kind {
335 MacroCallKind::FnLike { .. } => return None,
336 MacroCallKind::Derive { derive_attr_index, .. } => {
337 cov_mark::hit!(derive_censoring);
338 ast::Item::cast(node.clone())?
339 .attrs()
340 .take(derive_attr_index.ast_index() + 1)
341 // FIXME, this resolution should not be done syntactically
342 // derive is a proper macro now, no longer builtin
343 // But we do not have resolution at this stage, this means
344 // we need to know about all macro calls for the given ast item here
345 // so we require some kind of mapping...
346 .filter(|attr| attr.simple_name().as_deref() == Some("derive"))
347 .map(|it| it.syntax().clone())
348 .collect()
349 }
350 MacroCallKind::Attr { .. } if loc.def.is_attribute_derive() => return None,
351 MacroCallKind::Attr { invoc_attr_index, .. } => {
352 cov_mark::hit!(attribute_macro_attr_censoring);
353 ast::Item::cast(node.clone())?
354 .doc_comments_and_attrs()
355 .nth(invoc_attr_index.ast_index())
356 .and_then(Either::left)
357 .map(|attr| attr.syntax().clone())
358 .into_iter()
359 .collect()
360 }
361 };
362 Some(censor)
363 })()
364 .unwrap_or_default()
365 }
366
macro_arg_text(db: &dyn ExpandDatabase, id: MacroCallId) -> Option<GreenNode>367 fn macro_arg_text(db: &dyn ExpandDatabase, id: MacroCallId) -> Option<GreenNode> {
368 let loc = db.lookup_intern_macro_call(id);
369 let arg = loc.kind.arg(db)?;
370 if matches!(loc.kind, MacroCallKind::FnLike { .. }) {
371 let first = arg.first_child_or_token().map_or(T![.], |it| it.kind());
372 let last = arg.last_child_or_token().map_or(T![.], |it| it.kind());
373 let well_formed_tt =
374 matches!((first, last), (T!['('], T![')']) | (T!['['], T![']']) | (T!['{'], T!['}']));
375 if !well_formed_tt {
376 // Don't expand malformed (unbalanced) macro invocations. This is
377 // less than ideal, but trying to expand unbalanced macro calls
378 // sometimes produces pathological, deeply nested code which breaks
379 // all kinds of things.
380 //
381 // Some day, we'll have explicit recursion counters for all
382 // recursive things, at which point this code might be removed.
383 cov_mark::hit!(issue9358_bad_macro_stack_overflow);
384 return None;
385 }
386 }
387 if let Some(EagerCallInfo { arg, .. }) = loc.eager.as_deref() {
388 Some(
389 mbe::token_tree_to_syntax_node(&arg.0, mbe::TopEntryPoint::Expr)
390 .0
391 .syntax_node()
392 .green()
393 .into(),
394 )
395 } else {
396 Some(arg.green().into())
397 }
398 }
399
macro_def( db: &dyn ExpandDatabase, id: MacroDefId, ) -> Result<Arc<TokenExpander>, mbe::ParseError>400 fn macro_def(
401 db: &dyn ExpandDatabase,
402 id: MacroDefId,
403 ) -> Result<Arc<TokenExpander>, mbe::ParseError> {
404 match id.kind {
405 MacroDefKind::Declarative(ast_id) => {
406 let is_2021 = db.crate_graph()[id.krate].edition >= Edition::Edition2021;
407 let (mac, def_site_token_map) = match ast_id.to_node(db) {
408 ast::Macro::MacroRules(macro_rules) => {
409 let arg = macro_rules
410 .token_tree()
411 .ok_or_else(|| mbe::ParseError::Expected("expected a token tree".into()))?;
412 let (tt, def_site_token_map) = mbe::syntax_node_to_token_tree(arg.syntax());
413 let mac = mbe::DeclarativeMacro::parse_macro_rules(&tt, is_2021)?;
414 (mac, def_site_token_map)
415 }
416 ast::Macro::MacroDef(macro_def) => {
417 let arg = macro_def
418 .body()
419 .ok_or_else(|| mbe::ParseError::Expected("expected a token tree".into()))?;
420 let (tt, def_site_token_map) = mbe::syntax_node_to_token_tree(arg.syntax());
421 let mac = mbe::DeclarativeMacro::parse_macro2(&tt, is_2021)?;
422 (mac, def_site_token_map)
423 }
424 };
425 Ok(Arc::new(TokenExpander::DeclarativeMacro { mac, def_site_token_map }))
426 }
427 MacroDefKind::BuiltIn(expander, _) => Ok(Arc::new(TokenExpander::Builtin(expander))),
428 MacroDefKind::BuiltInAttr(expander, _) => {
429 Ok(Arc::new(TokenExpander::BuiltinAttr(expander)))
430 }
431 MacroDefKind::BuiltInDerive(expander, _) => {
432 Ok(Arc::new(TokenExpander::BuiltinDerive(expander)))
433 }
434 MacroDefKind::BuiltInEager(expander, ..) => {
435 Ok(Arc::new(TokenExpander::BuiltinEager(expander)))
436 }
437 MacroDefKind::ProcMacro(expander, ..) => Ok(Arc::new(TokenExpander::ProcMacro(expander))),
438 }
439 }
440
macro_expand(db: &dyn ExpandDatabase, id: MacroCallId) -> ExpandResult<Arc<tt::Subtree>>441 fn macro_expand(db: &dyn ExpandDatabase, id: MacroCallId) -> ExpandResult<Arc<tt::Subtree>> {
442 let _p = profile::span("macro_expand");
443 let loc = db.lookup_intern_macro_call(id);
444 if let Some(EagerCallInfo { arg, arg_id: None, error }) = loc.eager.as_deref() {
445 // This is an input expansion for an eager macro. These are already pre-expanded
446 return ExpandResult { value: Arc::new(arg.0.clone()), err: error.clone() };
447 }
448 let expander = match db.macro_def(loc.def) {
449 Ok(it) => it,
450 // FIXME: We should make sure to enforce a variant that invalid macro
451 // definitions do not get expanders that could reach this call path!
452 Err(err) => {
453 return ExpandResult {
454 value: Arc::new(tt::Subtree {
455 delimiter: tt::Delimiter::UNSPECIFIED,
456 token_trees: vec![],
457 }),
458 err: Some(ExpandError::other(format!("invalid macro definition: {err}"))),
459 }
460 }
461 };
462 let Some(macro_arg) = db.macro_arg(id) else {
463 return ExpandResult {
464 value: Arc::new(
465 tt::Subtree {
466 delimiter: tt::Delimiter::UNSPECIFIED,
467 token_trees: Vec::new(),
468 },
469 ),
470 // FIXME: We should make sure to enforce a variant that invalid macro
471 // calls do not reach this call path!
472 err: Some(ExpandError::other(
473 "invalid token tree"
474 )),
475 };
476 };
477 let (arg_tt, arg_tm, undo_info) = &*macro_arg;
478 let ExpandResult { value: mut tt, mut err } = expander.expand(db, id, arg_tt);
479
480 if let Some(EagerCallInfo { error, .. }) = loc.eager.as_deref() {
481 // FIXME: We should report both errors!
482 err = error.clone().or(err);
483 }
484
485 // Set a hard limit for the expanded tt
486 let count = tt.count();
487 if TOKEN_LIMIT.check(count).is_err() {
488 return ExpandResult {
489 value: Arc::new(tt::Subtree {
490 delimiter: tt::Delimiter::UNSPECIFIED,
491 token_trees: vec![],
492 }),
493 err: Some(ExpandError::other(format!(
494 "macro invocation exceeds token limit: produced {} tokens, limit is {}",
495 count,
496 TOKEN_LIMIT.inner(),
497 ))),
498 };
499 }
500
501 fixup::reverse_fixups(&mut tt, arg_tm, undo_info);
502
503 ExpandResult { value: Arc::new(tt), err }
504 }
505
parse_macro_expansion_error( db: &dyn ExpandDatabase, macro_call_id: MacroCallId, ) -> ExpandResult<Box<[SyntaxError]>>506 fn parse_macro_expansion_error(
507 db: &dyn ExpandDatabase,
508 macro_call_id: MacroCallId,
509 ) -> ExpandResult<Box<[SyntaxError]>> {
510 db.parse_macro_expansion(MacroFile { macro_call_id })
511 .map(|it| it.0.errors().to_vec().into_boxed_slice())
512 }
513
expand_proc_macro(db: &dyn ExpandDatabase, id: MacroCallId) -> ExpandResult<tt::Subtree>514 fn expand_proc_macro(db: &dyn ExpandDatabase, id: MacroCallId) -> ExpandResult<tt::Subtree> {
515 let loc = db.lookup_intern_macro_call(id);
516 let Some(macro_arg) = db.macro_arg(id) else {
517 return ExpandResult {
518 value: tt::Subtree {
519 delimiter: tt::Delimiter::UNSPECIFIED,
520 token_trees: Vec::new(),
521 },
522 err: Some(ExpandError::other(
523 "invalid token tree"
524 )),
525 };
526 };
527
528 let expander = match loc.def.kind {
529 MacroDefKind::ProcMacro(expander, ..) => expander,
530 _ => unreachable!(),
531 };
532
533 let attr_arg = match &loc.kind {
534 MacroCallKind::Attr { attr_args, .. } => {
535 let mut attr_args = attr_args.0.clone();
536 mbe::Shift::new(¯o_arg.0).shift_all(&mut attr_args);
537 Some(attr_args)
538 }
539 _ => None,
540 };
541
542 expander.expand(db, loc.def.krate, loc.krate, ¯o_arg.0, attr_arg.as_ref())
543 }
544
hygiene_frame(db: &dyn ExpandDatabase, file_id: HirFileId) -> Arc<HygieneFrame>545 fn hygiene_frame(db: &dyn ExpandDatabase, file_id: HirFileId) -> Arc<HygieneFrame> {
546 Arc::new(HygieneFrame::new(db, file_id))
547 }
548
macro_expand_to(db: &dyn ExpandDatabase, id: MacroCallId) -> ExpandTo549 fn macro_expand_to(db: &dyn ExpandDatabase, id: MacroCallId) -> ExpandTo {
550 db.lookup_intern_macro_call(id).expand_to()
551 }
552
token_tree_to_syntax_node( tt: &tt::Subtree, expand_to: ExpandTo, ) -> (Parse<SyntaxNode>, mbe::TokenMap)553 fn token_tree_to_syntax_node(
554 tt: &tt::Subtree,
555 expand_to: ExpandTo,
556 ) -> (Parse<SyntaxNode>, mbe::TokenMap) {
557 let entry_point = match expand_to {
558 ExpandTo::Statements => mbe::TopEntryPoint::MacroStmts,
559 ExpandTo::Items => mbe::TopEntryPoint::MacroItems,
560 ExpandTo::Pattern => mbe::TopEntryPoint::Pattern,
561 ExpandTo::Type => mbe::TopEntryPoint::Type,
562 ExpandTo::Expr => mbe::TopEntryPoint::Expr,
563 };
564 mbe::token_tree_to_syntax_node(tt, entry_point)
565 }
566