1 //! The main loop of `rust-analyzer` responsible for dispatching LSP
2 //! requests/replies and notifications back to the client.
3 use std::{
4 fmt,
5 time::{Duration, Instant},
6 };
7
8 use always_assert::always;
9 use crossbeam_channel::{select, Receiver};
10 use flycheck::FlycheckHandle;
11 use ide_db::base_db::{SourceDatabaseExt, VfsPath};
12 use lsp_server::{Connection, Notification, Request};
13 use lsp_types::notification::Notification as _;
14 use stdx::thread::ThreadIntent;
15 use triomphe::Arc;
16 use vfs::FileId;
17
18 use crate::{
19 config::Config,
20 dispatch::{NotificationDispatcher, RequestDispatcher},
21 from_proto,
22 global_state::{file_id_to_url, url_to_file_id, GlobalState},
23 lsp_ext,
24 lsp_utils::{notification_is, Progress},
25 reload::{BuildDataProgress, ProcMacroProgress, ProjectWorkspaceProgress},
26 Result,
27 };
28
main_loop(config: Config, connection: Connection) -> Result<()>29 pub fn main_loop(config: Config, connection: Connection) -> Result<()> {
30 tracing::info!("initial config: {:#?}", config);
31
32 // Windows scheduler implements priority boosts: if thread waits for an
33 // event (like a condvar), and event fires, priority of the thread is
34 // temporary bumped. This optimization backfires in our case: each time the
35 // `main_loop` schedules a task to run on a threadpool, the worker threads
36 // gets a higher priority, and (on a machine with fewer cores) displaces the
37 // main loop! We work around this by marking the main loop as a
38 // higher-priority thread.
39 //
40 // https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities
41 // https://docs.microsoft.com/en-us/windows/win32/procthread/priority-boosts
42 // https://github.com/rust-lang/rust-analyzer/issues/2835
43 #[cfg(windows)]
44 unsafe {
45 use winapi::um::processthreadsapi::*;
46 let thread = GetCurrentThread();
47 let thread_priority_above_normal = 1;
48 SetThreadPriority(thread, thread_priority_above_normal);
49 }
50
51 GlobalState::new(connection.sender, config).run(connection.receiver)
52 }
53
54 enum Event {
55 Lsp(lsp_server::Message),
56 Task(Task),
57 Vfs(vfs::loader::Message),
58 Flycheck(flycheck::Message),
59 }
60
61 #[derive(Debug)]
62 pub(crate) enum Task {
63 Response(lsp_server::Response),
64 Retry(lsp_server::Request),
65 Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
66 PrimeCaches(PrimeCachesProgress),
67 FetchWorkspace(ProjectWorkspaceProgress),
68 FetchBuildData(BuildDataProgress),
69 LoadProcMacros(ProcMacroProgress),
70 }
71
72 #[derive(Debug)]
73 pub(crate) enum PrimeCachesProgress {
74 Begin,
75 Report(ide::ParallelPrimeCachesProgress),
76 End { cancelled: bool },
77 }
78
79 impl fmt::Debug for Event {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result80 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
81 let debug_non_verbose = |not: &Notification, f: &mut fmt::Formatter<'_>| {
82 f.debug_struct("Notification").field("method", ¬.method).finish()
83 };
84
85 match self {
86 Event::Lsp(lsp_server::Message::Notification(not)) => {
87 if notification_is::<lsp_types::notification::DidOpenTextDocument>(not)
88 || notification_is::<lsp_types::notification::DidChangeTextDocument>(not)
89 {
90 return debug_non_verbose(not, f);
91 }
92 }
93 Event::Task(Task::Response(resp)) => {
94 return f
95 .debug_struct("Response")
96 .field("id", &resp.id)
97 .field("error", &resp.error)
98 .finish();
99 }
100 _ => (),
101 }
102 match self {
103 Event::Lsp(it) => fmt::Debug::fmt(it, f),
104 Event::Task(it) => fmt::Debug::fmt(it, f),
105 Event::Vfs(it) => fmt::Debug::fmt(it, f),
106 Event::Flycheck(it) => fmt::Debug::fmt(it, f),
107 }
108 }
109 }
110
111 impl GlobalState {
run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()>112 fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
113 self.update_status_or_notify();
114
115 if self.config.did_save_text_document_dynamic_registration() {
116 self.register_did_save_capability();
117 }
118
119 self.fetch_workspaces_queue.request_op("startup".to_string(), false);
120 if let Some((cause, force_crate_graph_reload)) =
121 self.fetch_workspaces_queue.should_start_op()
122 {
123 self.fetch_workspaces(cause, force_crate_graph_reload);
124 }
125
126 while let Some(event) = self.next_event(&inbox) {
127 if matches!(
128 &event,
129 Event::Lsp(lsp_server::Message::Notification(Notification { method, .. }))
130 if method == lsp_types::notification::Exit::METHOD
131 ) {
132 return Ok(());
133 }
134 self.handle_event(event)?;
135 }
136
137 Err("client exited without proper shutdown sequence".into())
138 }
139
register_did_save_capability(&mut self)140 fn register_did_save_capability(&mut self) {
141 let save_registration_options = lsp_types::TextDocumentSaveRegistrationOptions {
142 include_text: Some(false),
143 text_document_registration_options: lsp_types::TextDocumentRegistrationOptions {
144 document_selector: Some(vec![
145 lsp_types::DocumentFilter {
146 language: None,
147 scheme: None,
148 pattern: Some("**/*.rs".into()),
149 },
150 lsp_types::DocumentFilter {
151 language: None,
152 scheme: None,
153 pattern: Some("**/Cargo.toml".into()),
154 },
155 lsp_types::DocumentFilter {
156 language: None,
157 scheme: None,
158 pattern: Some("**/Cargo.lock".into()),
159 },
160 ]),
161 },
162 };
163
164 let registration = lsp_types::Registration {
165 id: "textDocument/didSave".to_string(),
166 method: "textDocument/didSave".to_string(),
167 register_options: Some(serde_json::to_value(save_registration_options).unwrap()),
168 };
169 self.send_request::<lsp_types::request::RegisterCapability>(
170 lsp_types::RegistrationParams { registrations: vec![registration] },
171 |_, _| (),
172 );
173 }
174
next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event>175 fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
176 select! {
177 recv(inbox) -> msg =>
178 msg.ok().map(Event::Lsp),
179
180 recv(self.task_pool.receiver) -> task =>
181 Some(Event::Task(task.unwrap())),
182
183 recv(self.fmt_pool.receiver) -> task =>
184 Some(Event::Task(task.unwrap())),
185
186 recv(self.loader.receiver) -> task =>
187 Some(Event::Vfs(task.unwrap())),
188
189 recv(self.flycheck_receiver) -> task =>
190 Some(Event::Flycheck(task.unwrap())),
191 }
192 }
193
handle_event(&mut self, event: Event) -> Result<()>194 fn handle_event(&mut self, event: Event) -> Result<()> {
195 let loop_start = Instant::now();
196 // NOTE: don't count blocking select! call as a loop-turn time
197 let _p = profile::span("GlobalState::handle_event");
198
199 let event_dbg_msg = format!("{event:?}");
200 tracing::debug!("{:?} handle_event({})", loop_start, event_dbg_msg);
201 if tracing::enabled!(tracing::Level::INFO) {
202 let task_queue_len = self.task_pool.handle.len();
203 if task_queue_len > 0 {
204 tracing::info!("task queue len: {}", task_queue_len);
205 }
206 }
207
208 let was_quiescent = self.is_quiescent();
209 match event {
210 Event::Lsp(msg) => match msg {
211 lsp_server::Message::Request(req) => self.on_new_request(loop_start, req),
212 lsp_server::Message::Notification(not) => self.on_notification(not)?,
213 lsp_server::Message::Response(resp) => self.complete_request(resp),
214 },
215 Event::Task(task) => {
216 let _p = profile::span("GlobalState::handle_event/task");
217 let mut prime_caches_progress = Vec::new();
218
219 self.handle_task(&mut prime_caches_progress, task);
220 // Coalesce multiple task events into one loop turn
221 while let Ok(task) = self.task_pool.receiver.try_recv() {
222 self.handle_task(&mut prime_caches_progress, task);
223 }
224
225 for progress in prime_caches_progress {
226 let (state, message, fraction);
227 match progress {
228 PrimeCachesProgress::Begin => {
229 state = Progress::Begin;
230 message = None;
231 fraction = 0.0;
232 }
233 PrimeCachesProgress::Report(report) => {
234 state = Progress::Report;
235
236 message = match &report.crates_currently_indexing[..] {
237 [crate_name] => Some(format!(
238 "{}/{} ({crate_name})",
239 report.crates_done, report.crates_total
240 )),
241 [crate_name, rest @ ..] => Some(format!(
242 "{}/{} ({} + {} more)",
243 report.crates_done,
244 report.crates_total,
245 crate_name,
246 rest.len()
247 )),
248 _ => None,
249 };
250
251 fraction = Progress::fraction(report.crates_done, report.crates_total);
252 }
253 PrimeCachesProgress::End { cancelled } => {
254 state = Progress::End;
255 message = None;
256 fraction = 1.0;
257
258 self.prime_caches_queue.op_completed(());
259 if cancelled {
260 self.prime_caches_queue
261 .request_op("restart after cancellation".to_string(), ());
262 }
263 }
264 };
265
266 self.report_progress("Indexing", state, message, Some(fraction), None);
267 }
268 }
269 Event::Vfs(message) => {
270 let _p = profile::span("GlobalState::handle_event/vfs");
271 self.handle_vfs_msg(message);
272 // Coalesce many VFS event into a single loop turn
273 while let Ok(message) = self.loader.receiver.try_recv() {
274 self.handle_vfs_msg(message);
275 }
276 }
277 Event::Flycheck(message) => {
278 let _p = profile::span("GlobalState::handle_event/flycheck");
279 self.handle_flycheck_msg(message);
280 // Coalesce many flycheck updates into a single loop turn
281 while let Ok(message) = self.flycheck_receiver.try_recv() {
282 self.handle_flycheck_msg(message);
283 }
284 }
285 }
286 let event_handling_duration = loop_start.elapsed();
287
288 let state_changed = self.process_changes();
289 let memdocs_added_or_removed = self.mem_docs.take_changes();
290
291 if self.is_quiescent() {
292 let became_quiescent = !(was_quiescent
293 || self.fetch_workspaces_queue.op_requested()
294 || self.fetch_build_data_queue.op_requested()
295 || self.fetch_proc_macros_queue.op_requested());
296
297 if became_quiescent {
298 if self.config.check_on_save() {
299 // Project has loaded properly, kick off initial flycheck
300 self.flycheck.iter().for_each(FlycheckHandle::restart);
301 }
302 if self.config.prefill_caches() {
303 self.prime_caches_queue.request_op("became quiescent".to_string(), ());
304 }
305 }
306
307 let client_refresh = !was_quiescent || state_changed;
308 if client_refresh {
309 // Refresh semantic tokens if the client supports it.
310 if self.config.semantic_tokens_refresh() {
311 self.semantic_tokens_cache.lock().clear();
312 self.send_request::<lsp_types::request::SemanticTokensRefresh>((), |_, _| ());
313 }
314
315 // Refresh code lens if the client supports it.
316 if self.config.code_lens_refresh() {
317 self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
318 }
319
320 // Refresh inlay hints if the client supports it.
321 if self.config.inlay_hints_refresh() {
322 self.send_request::<lsp_types::request::InlayHintRefreshRequest>((), |_, _| ());
323 }
324 }
325
326 let update_diagnostics = (!was_quiescent || state_changed || memdocs_added_or_removed)
327 && self.config.publish_diagnostics();
328 if update_diagnostics {
329 self.update_diagnostics()
330 }
331 }
332
333 if let Some(diagnostic_changes) = self.diagnostics.take_changes() {
334 for file_id in diagnostic_changes {
335 let uri = file_id_to_url(&self.vfs.read().0, file_id);
336 let mut diagnostics =
337 self.diagnostics.diagnostics_for(file_id).cloned().collect::<Vec<_>>();
338
339 // VSCode assumes diagnostic messages to be non-empty strings, so we need to patch
340 // empty diagnostics. Neither the docs of VSCode nor the LSP spec say whether
341 // diagnostic messages are actually allowed to be empty or not and patching this
342 // in the VSCode client does not work as the assertion happens in the protocol
343 // conversion. So this hack is here to stay, and will be considered a hack
344 // until the LSP decides to state that empty messages are allowed.
345
346 // See https://github.com/rust-lang/rust-analyzer/issues/11404
347 // See https://github.com/rust-lang/rust-analyzer/issues/13130
348 let patch_empty = |message: &mut String| {
349 if message.is_empty() {
350 *message = " ".to_string();
351 }
352 };
353
354 for d in &mut diagnostics {
355 patch_empty(&mut d.message);
356 if let Some(dri) = &mut d.related_information {
357 for dri in dri {
358 patch_empty(&mut dri.message);
359 }
360 }
361 }
362
363 let version = from_proto::vfs_path(&uri)
364 .map(|path| self.mem_docs.get(&path).map(|it| it.version))
365 .unwrap_or_default();
366
367 self.send_notification::<lsp_types::notification::PublishDiagnostics>(
368 lsp_types::PublishDiagnosticsParams { uri, diagnostics, version },
369 );
370 }
371 }
372
373 if self.config.cargo_autoreload() {
374 if let Some((cause, force_crate_graph_reload)) =
375 self.fetch_workspaces_queue.should_start_op()
376 {
377 self.fetch_workspaces(cause, force_crate_graph_reload);
378 }
379 }
380
381 if !self.fetch_workspaces_queue.op_in_progress() {
382 if let Some((cause, ())) = self.fetch_build_data_queue.should_start_op() {
383 self.fetch_build_data(cause);
384 } else if let Some((cause, paths)) = self.fetch_proc_macros_queue.should_start_op() {
385 self.fetch_proc_macros(cause, paths);
386 }
387 }
388
389 if let Some((cause, ())) = self.prime_caches_queue.should_start_op() {
390 self.prime_caches(cause);
391 }
392
393 self.update_status_or_notify();
394
395 let loop_duration = loop_start.elapsed();
396 if loop_duration > Duration::from_millis(100) && was_quiescent {
397 tracing::warn!("overly long loop turn took {loop_duration:?} (event handling took {event_handling_duration:?}): {event_dbg_msg}");
398 self.poke_rust_analyzer_developer(format!(
399 "overly long loop turn took {loop_duration:?} (event handling took {event_handling_duration:?}): {event_dbg_msg}"
400 ));
401 }
402 Ok(())
403 }
404
prime_caches(&mut self, cause: String)405 fn prime_caches(&mut self, cause: String) {
406 tracing::debug!(%cause, "will prime caches");
407 let num_worker_threads = self.config.prime_caches_num_threads();
408
409 self.task_pool.handle.spawn_with_sender(ThreadIntent::Worker, {
410 let analysis = self.snapshot().analysis;
411 move |sender| {
412 sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
413 let res = analysis.parallel_prime_caches(num_worker_threads, |progress| {
414 let report = PrimeCachesProgress::Report(progress);
415 sender.send(Task::PrimeCaches(report)).unwrap();
416 });
417 sender
418 .send(Task::PrimeCaches(PrimeCachesProgress::End { cancelled: res.is_err() }))
419 .unwrap();
420 }
421 });
422 }
423
update_status_or_notify(&mut self)424 fn update_status_or_notify(&mut self) {
425 let status = self.current_status();
426 if self.last_reported_status.as_ref() != Some(&status) {
427 self.last_reported_status = Some(status.clone());
428
429 if self.config.server_status_notification() {
430 self.send_notification::<lsp_ext::ServerStatusNotification>(status);
431 } else if let (
432 health @ (lsp_ext::Health::Warning | lsp_ext::Health::Error),
433 Some(message),
434 ) = (status.health, &status.message)
435 {
436 let open_log_button = tracing::enabled!(tracing::Level::ERROR)
437 && (self.fetch_build_data_error().is_err()
438 || self.fetch_workspace_error().is_err());
439 self.show_message(
440 match health {
441 lsp_ext::Health::Ok => lsp_types::MessageType::INFO,
442 lsp_ext::Health::Warning => lsp_types::MessageType::WARNING,
443 lsp_ext::Health::Error => lsp_types::MessageType::ERROR,
444 },
445 message.clone(),
446 open_log_button,
447 );
448 }
449 }
450 }
451
handle_task(&mut self, prime_caches_progress: &mut Vec<PrimeCachesProgress>, task: Task)452 fn handle_task(&mut self, prime_caches_progress: &mut Vec<PrimeCachesProgress>, task: Task) {
453 match task {
454 Task::Response(response) => self.respond(response),
455 // Only retry requests that haven't been cancelled. Otherwise we do unnecessary work.
456 Task::Retry(req) if !self.is_completed(&req) => self.on_request(req),
457 Task::Retry(_) => (),
458 Task::Diagnostics(diagnostics_per_file) => {
459 for (file_id, diagnostics) in diagnostics_per_file {
460 self.diagnostics.set_native_diagnostics(file_id, diagnostics)
461 }
462 }
463 Task::PrimeCaches(progress) => match progress {
464 PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
465 PrimeCachesProgress::Report(_) => {
466 match prime_caches_progress.last_mut() {
467 Some(last @ PrimeCachesProgress::Report(_)) => {
468 // Coalesce subsequent update events.
469 *last = progress;
470 }
471 _ => prime_caches_progress.push(progress),
472 }
473 }
474 PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
475 },
476 Task::FetchWorkspace(progress) => {
477 let (state, msg) = match progress {
478 ProjectWorkspaceProgress::Begin => (Progress::Begin, None),
479 ProjectWorkspaceProgress::Report(msg) => (Progress::Report, Some(msg)),
480 ProjectWorkspaceProgress::End(workspaces, force_reload_crate_graph) => {
481 self.fetch_workspaces_queue
482 .op_completed(Some((workspaces, force_reload_crate_graph)));
483 if let Err(e) = self.fetch_workspace_error() {
484 tracing::error!("FetchWorkspaceError:\n{e}");
485 }
486
487 let old = Arc::clone(&self.workspaces);
488 self.switch_workspaces("fetched workspace".to_string());
489 let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
490
491 if self.config.run_build_scripts() && workspaces_updated {
492 self.fetch_build_data_queue
493 .request_op(format!("workspace updated"), ());
494 }
495
496 (Progress::End, None)
497 }
498 };
499
500 self.report_progress("Fetching", state, msg, None, None);
501 }
502 Task::FetchBuildData(progress) => {
503 let (state, msg) = match progress {
504 BuildDataProgress::Begin => (Some(Progress::Begin), None),
505 BuildDataProgress::Report(msg) => (Some(Progress::Report), Some(msg)),
506 BuildDataProgress::End(build_data_result) => {
507 self.fetch_build_data_queue.op_completed(build_data_result);
508 if let Err(e) = self.fetch_build_data_error() {
509 tracing::error!("FetchBuildDataError:\n{e}");
510 }
511
512 self.switch_workspaces("fetched build data".to_string());
513
514 (Some(Progress::End), None)
515 }
516 };
517
518 if let Some(state) = state {
519 self.report_progress("Building", state, msg, None, None);
520 }
521 }
522 Task::LoadProcMacros(progress) => {
523 let (state, msg) = match progress {
524 ProcMacroProgress::Begin => (Some(Progress::Begin), None),
525 ProcMacroProgress::Report(msg) => (Some(Progress::Report), Some(msg)),
526 ProcMacroProgress::End(proc_macro_load_result) => {
527 self.fetch_proc_macros_queue.op_completed(true);
528 self.set_proc_macros(proc_macro_load_result);
529
530 (Some(Progress::End), None)
531 }
532 };
533
534 if let Some(state) = state {
535 self.report_progress("Loading", state, msg, None, None);
536 }
537 }
538 }
539 }
540
handle_vfs_msg(&mut self, message: vfs::loader::Message)541 fn handle_vfs_msg(&mut self, message: vfs::loader::Message) {
542 match message {
543 vfs::loader::Message::Loaded { files } => {
544 let vfs = &mut self.vfs.write().0;
545 for (path, contents) in files {
546 let path = VfsPath::from(path);
547 if !self.mem_docs.contains(&path) {
548 vfs.set_file_contents(path, contents);
549 }
550 }
551 }
552 vfs::loader::Message::Progress { n_total, n_done, config_version } => {
553 always!(config_version <= self.vfs_config_version);
554
555 self.vfs_progress_config_version = config_version;
556 self.vfs_progress_n_total = n_total;
557 self.vfs_progress_n_done = n_done;
558
559 let state = if n_done == 0 {
560 Progress::Begin
561 } else if n_done < n_total {
562 Progress::Report
563 } else {
564 assert_eq!(n_done, n_total);
565 Progress::End
566 };
567 self.report_progress(
568 "Roots Scanned",
569 state,
570 Some(format!("{n_done}/{n_total}")),
571 Some(Progress::fraction(n_done, n_total)),
572 None,
573 );
574 }
575 }
576 }
577
handle_flycheck_msg(&mut self, message: flycheck::Message)578 fn handle_flycheck_msg(&mut self, message: flycheck::Message) {
579 match message {
580 flycheck::Message::AddDiagnostic { id, workspace_root, diagnostic } => {
581 let snap = self.snapshot();
582 let diagnostics = crate::diagnostics::to_proto::map_rust_diagnostic_to_lsp(
583 &self.config.diagnostics_map(),
584 &diagnostic,
585 &workspace_root,
586 &snap,
587 );
588 for diag in diagnostics {
589 match url_to_file_id(&self.vfs.read().0, &diag.url) {
590 Ok(file_id) => self.diagnostics.add_check_diagnostic(
591 id,
592 file_id,
593 diag.diagnostic,
594 diag.fix,
595 ),
596 Err(err) => {
597 tracing::error!(
598 "flycheck {id}: File with cargo diagnostic not found in VFS: {}",
599 err
600 );
601 }
602 };
603 }
604 }
605
606 flycheck::Message::Progress { id, progress } => {
607 let (state, message) = match progress {
608 flycheck::Progress::DidStart => {
609 self.diagnostics.clear_check(id);
610 (Progress::Begin, None)
611 }
612 flycheck::Progress::DidCheckCrate(target) => (Progress::Report, Some(target)),
613 flycheck::Progress::DidCancel => {
614 self.last_flycheck_error = None;
615 (Progress::End, None)
616 }
617 flycheck::Progress::DidFailToRestart(err) => {
618 self.last_flycheck_error =
619 Some(format!("cargo check failed to start: {err}"));
620 return;
621 }
622 flycheck::Progress::DidFinish(result) => {
623 self.last_flycheck_error =
624 result.err().map(|err| format!("cargo check failed to start: {err}"));
625 (Progress::End, None)
626 }
627 };
628
629 // When we're running multiple flychecks, we have to include a disambiguator in
630 // the title, or the editor complains. Note that this is a user-facing string.
631 let title = if self.flycheck.len() == 1 {
632 format!("{}", self.config.flycheck())
633 } else {
634 format!("cargo check (#{})", id + 1)
635 };
636 self.report_progress(
637 &title,
638 state,
639 message,
640 None,
641 Some(format!("rust-analyzer/flycheck/{id}")),
642 );
643 }
644 }
645 }
646
647 /// Registers and handles a request. This should only be called once per incoming request.
on_new_request(&mut self, request_received: Instant, req: Request)648 fn on_new_request(&mut self, request_received: Instant, req: Request) {
649 self.register_request(&req, request_received);
650 self.on_request(req);
651 }
652
653 /// Handles a request.
on_request(&mut self, req: Request)654 fn on_request(&mut self, req: Request) {
655 let mut dispatcher = RequestDispatcher { req: Some(req), global_state: self };
656 dispatcher.on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
657 s.shutdown_requested = true;
658 Ok(())
659 });
660
661 match &mut dispatcher {
662 RequestDispatcher { req: Some(req), global_state: this } if this.shutdown_requested => {
663 this.respond(lsp_server::Response::new_err(
664 req.id.clone(),
665 lsp_server::ErrorCode::InvalidRequest as i32,
666 "Shutdown already requested.".to_owned(),
667 ));
668 return;
669 }
670 _ => (),
671 }
672
673 use crate::handlers::request as handlers;
674
675 dispatcher
676 // Request handlers that must run on the main thread
677 // because they mutate GlobalState:
678 .on_sync_mut::<lsp_ext::ReloadWorkspace>(handlers::handle_workspace_reload)
679 .on_sync_mut::<lsp_ext::RebuildProcMacros>(handlers::handle_proc_macros_rebuild)
680 .on_sync_mut::<lsp_ext::MemoryUsage>(handlers::handle_memory_usage)
681 .on_sync_mut::<lsp_ext::ShuffleCrateGraph>(handlers::handle_shuffle_crate_graph)
682 // Request handlers which are related to the user typing
683 // are run on the main thread to reduce latency:
684 .on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)
685 .on_sync::<lsp_ext::OnEnter>(handlers::handle_on_enter)
686 .on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)
687 .on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace)
688 .on_sync::<lsp_ext::OnTypeFormatting>(handlers::handle_on_type_formatting)
689 // Formatting should be done immediately as the editor might wait on it, but we can't
690 // put it on the main thread as we do not want the main thread to block on rustfmt.
691 // So we have an extra thread just for formatting requests to make sure it gets handled
692 // as fast as possible.
693 .on_fmt_thread::<lsp_types::request::Formatting>(handlers::handle_formatting)
694 .on_fmt_thread::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
695 // We can’t run latency-sensitive request handlers which do semantic
696 // analysis on the main thread because that would block other
697 // requests. Instead, we run these request handlers on higher priority
698 // threads in the threadpool.
699 .on_latency_sensitive::<lsp_types::request::Completion>(handlers::handle_completion)
700 .on_latency_sensitive::<lsp_types::request::ResolveCompletionItem>(
701 handlers::handle_completion_resolve,
702 )
703 .on_latency_sensitive::<lsp_types::request::SemanticTokensFullRequest>(
704 handlers::handle_semantic_tokens_full,
705 )
706 .on_latency_sensitive::<lsp_types::request::SemanticTokensFullDeltaRequest>(
707 handlers::handle_semantic_tokens_full_delta,
708 )
709 .on_latency_sensitive::<lsp_types::request::SemanticTokensRangeRequest>(
710 handlers::handle_semantic_tokens_range,
711 )
712 // All other request handlers
713 .on::<lsp_ext::FetchDependencyList>(handlers::fetch_dependency_list)
714 .on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
715 .on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
716 .on::<lsp_ext::ViewHir>(handlers::handle_view_hir)
717 .on::<lsp_ext::ViewMir>(handlers::handle_view_mir)
718 .on::<lsp_ext::InterpretFunction>(handlers::handle_interpret_function)
719 .on::<lsp_ext::ViewFileText>(handlers::handle_view_file_text)
720 .on::<lsp_ext::ViewCrateGraph>(handlers::handle_view_crate_graph)
721 .on::<lsp_ext::ViewItemTree>(handlers::handle_view_item_tree)
722 .on::<lsp_ext::ExpandMacro>(handlers::handle_expand_macro)
723 .on::<lsp_ext::ParentModule>(handlers::handle_parent_module)
724 .on::<lsp_ext::Runnables>(handlers::handle_runnables)
725 .on::<lsp_ext::RelatedTests>(handlers::handle_related_tests)
726 .on::<lsp_ext::CodeActionRequest>(handlers::handle_code_action)
727 .on::<lsp_ext::CodeActionResolveRequest>(handlers::handle_code_action_resolve)
728 .on::<lsp_ext::HoverRequest>(handlers::handle_hover)
729 .on::<lsp_ext::ExternalDocs>(handlers::handle_open_docs)
730 .on::<lsp_ext::OpenCargoToml>(handlers::handle_open_cargo_toml)
731 .on::<lsp_ext::MoveItem>(handlers::handle_move_item)
732 .on::<lsp_ext::WorkspaceSymbol>(handlers::handle_workspace_symbol)
733 .on::<lsp_types::request::DocumentSymbolRequest>(handlers::handle_document_symbol)
734 .on::<lsp_types::request::GotoDefinition>(handlers::handle_goto_definition)
735 .on::<lsp_types::request::GotoDeclaration>(handlers::handle_goto_declaration)
736 .on::<lsp_types::request::GotoImplementation>(handlers::handle_goto_implementation)
737 .on::<lsp_types::request::GotoTypeDefinition>(handlers::handle_goto_type_definition)
738 .on_no_retry::<lsp_types::request::InlayHintRequest>(handlers::handle_inlay_hints)
739 .on::<lsp_types::request::InlayHintResolveRequest>(handlers::handle_inlay_hints_resolve)
740 .on::<lsp_types::request::CodeLensRequest>(handlers::handle_code_lens)
741 .on::<lsp_types::request::CodeLensResolve>(handlers::handle_code_lens_resolve)
742 .on::<lsp_types::request::FoldingRangeRequest>(handlers::handle_folding_range)
743 .on::<lsp_types::request::SignatureHelpRequest>(handlers::handle_signature_help)
744 .on::<lsp_types::request::PrepareRenameRequest>(handlers::handle_prepare_rename)
745 .on::<lsp_types::request::Rename>(handlers::handle_rename)
746 .on::<lsp_types::request::References>(handlers::handle_references)
747 .on::<lsp_types::request::DocumentHighlightRequest>(handlers::handle_document_highlight)
748 .on::<lsp_types::request::CallHierarchyPrepare>(handlers::handle_call_hierarchy_prepare)
749 .on::<lsp_types::request::CallHierarchyIncomingCalls>(
750 handlers::handle_call_hierarchy_incoming,
751 )
752 .on::<lsp_types::request::CallHierarchyOutgoingCalls>(
753 handlers::handle_call_hierarchy_outgoing,
754 )
755 .on::<lsp_types::request::WillRenameFiles>(handlers::handle_will_rename_files)
756 .on::<lsp_ext::Ssr>(handlers::handle_ssr)
757 .finish();
758 }
759
760 /// Handles an incoming notification.
on_notification(&mut self, not: Notification) -> Result<()>761 fn on_notification(&mut self, not: Notification) -> Result<()> {
762 use crate::handlers::notification as handlers;
763 use lsp_types::notification as notifs;
764
765 NotificationDispatcher { not: Some(not), global_state: self }
766 .on_sync_mut::<notifs::Cancel>(handlers::handle_cancel)?
767 .on_sync_mut::<notifs::WorkDoneProgressCancel>(
768 handlers::handle_work_done_progress_cancel,
769 )?
770 .on_sync_mut::<notifs::DidOpenTextDocument>(handlers::handle_did_open_text_document)?
771 .on_sync_mut::<notifs::DidChangeTextDocument>(
772 handlers::handle_did_change_text_document,
773 )?
774 .on_sync_mut::<notifs::DidCloseTextDocument>(handlers::handle_did_close_text_document)?
775 .on_sync_mut::<notifs::DidSaveTextDocument>(handlers::handle_did_save_text_document)?
776 .on_sync_mut::<notifs::DidChangeConfiguration>(
777 handlers::handle_did_change_configuration,
778 )?
779 .on_sync_mut::<notifs::DidChangeWorkspaceFolders>(
780 handlers::handle_did_change_workspace_folders,
781 )?
782 .on_sync_mut::<notifs::DidChangeWatchedFiles>(
783 handlers::handle_did_change_watched_files,
784 )?
785 .on_sync_mut::<lsp_ext::CancelFlycheck>(handlers::handle_cancel_flycheck)?
786 .on_sync_mut::<lsp_ext::ClearFlycheck>(handlers::handle_clear_flycheck)?
787 .on_sync_mut::<lsp_ext::RunFlycheck>(handlers::handle_run_flycheck)?
788 .finish();
789 Ok(())
790 }
791
update_diagnostics(&mut self)792 fn update_diagnostics(&mut self) {
793 let db = self.analysis_host.raw_database();
794 let subscriptions = self
795 .mem_docs
796 .iter()
797 .map(|path| self.vfs.read().0.file_id(path).unwrap())
798 .filter(|&file_id| {
799 let source_root = db.file_source_root(file_id);
800 // Only publish diagnostics for files in the workspace, not from crates.io deps
801 // or the sysroot.
802 // While theoretically these should never have errors, we have quite a few false
803 // positives particularly in the stdlib, and those diagnostics would stay around
804 // forever if we emitted them here.
805 !db.source_root(source_root).is_library
806 })
807 .collect::<Vec<_>>();
808
809 tracing::trace!("updating notifications for {:?}", subscriptions);
810
811 let snapshot = self.snapshot();
812
813 // Diagnostics are triggered by the user typing
814 // so we run them on a latency sensitive thread.
815 self.task_pool.handle.spawn(ThreadIntent::LatencySensitive, move || {
816 let _p = profile::span("publish_diagnostics");
817 let _ctx = stdx::panic_context::enter("publish_diagnostics".to_owned());
818 let diagnostics = subscriptions
819 .into_iter()
820 .filter_map(|file_id| {
821 let line_index = snapshot.file_line_index(file_id).ok()?;
822 Some((
823 file_id,
824 line_index,
825 snapshot
826 .analysis
827 .diagnostics(
828 &snapshot.config.diagnostics(),
829 ide::AssistResolveStrategy::None,
830 file_id,
831 )
832 .ok()?,
833 ))
834 })
835 .map(|(file_id, line_index, it)| {
836 (
837 file_id,
838 it.into_iter()
839 .map(move |d| lsp_types::Diagnostic {
840 range: crate::to_proto::range(&line_index, d.range),
841 severity: Some(crate::to_proto::diagnostic_severity(d.severity)),
842 code: Some(lsp_types::NumberOrString::String(
843 d.code.as_str().to_string(),
844 )),
845 code_description: Some(lsp_types::CodeDescription {
846 href: lsp_types::Url::parse(&format!(
847 "https://rust-analyzer.github.io/manual.html#{}",
848 d.code.as_str()
849 ))
850 .unwrap(),
851 }),
852 source: Some("rust-analyzer".to_string()),
853 message: d.message,
854 related_information: None,
855 tags: if d.unused {
856 Some(vec![lsp_types::DiagnosticTag::UNNECESSARY])
857 } else {
858 None
859 },
860 data: None,
861 })
862 .collect::<Vec<_>>(),
863 )
864 });
865 Task::Diagnostics(diagnostics.collect())
866 });
867 }
868 }
869