diff --git a/.vscode/launch.json b/.vscode/launch.json index 7d5f4ff932913..eacdfa4a3fc89 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -3,17 +3,18 @@ "version": "0.2.0", "configurations": [ { + "name": "Launch Client", "type": "extensionHost", "request": "launch", - "name": "Launch Client", "runtimeExecutable": "${execPath}", "args": ["--extensionDevelopmentPath=${workspaceFolder}/editors/vscode"], "sourceMaps": true, - "outFiles": ["${workspaceFolder}/editors/vscode/dist/*.js"], + "outFiles": ["${workspaceFolder}/editors/vscode/out/**/*.js"], "env": { "SERVER_PATH_DEV": "${workspaceRoot}/editors/vscode/target/debug/oxc_language_server", "RUST_LOG": "debug" - } + }, + "preLaunchTask": "build: extension+rust" }, { "type": "lldb", @@ -31,6 +32,61 @@ } // "args": ["--ARGS-TO-OXLINT"], // "cwd": "PATH-TO-TEST-PROJECT" + }, + { + "name": "Language Server (Socket)", + "type": "lldb", + "request": "launch", + "cargo": { + "args": ["build", "--package", "oxc_language_server"], + "filter": { + "name": "oxc_language_server", + "kind": "bin" + } + }, + "env": { + "OXC_LS_LISTEN": "unix:/tmp/oxc_ls.sock", + "RUST_LOG": "debug" + }, + "sourceLanguages": ["rust"], + "expressions": "native", + "stopOnEntry": false, + "presentation": { + "hidden": true, + "group": "", + "order": 1 + } + }, + { + "name": "Launch Client (External LS)", + "type": "extensionHost", + "request": "launch", + "runtimeExecutable": "${execPath}", + "args": ["--extensionDevelopmentPath=${workspaceFolder}/editors/vscode"], + "sourceMaps": true, + "outFiles": ["${workspaceFolder}/editors/vscode/out/**/*.js"], + "env": { + "OXC_LS_CONNECT": "unix:/tmp/oxc_ls.sock", + "RUST_LOG": "debug" + }, + "presentation": { + "hidden": true, + "group": "", + "order": 1, + } + } + ], + "compounds": [ + { + "name": "Debug VS Code Extension and Oxc Language Server", + "configurations": ["Language Server (Socket)", "Launch Client (External LS)"], + "stopAll": true, + "preLaunchTask": "build: extension+rust", + "presentation": { + "hidden": false, + "group": "", + "order": 1 + } } ] } diff --git a/.vscode/lldb_rust_init.py b/.vscode/lldb_rust_init.py new file mode 100644 index 0000000000000..a51dd0651a7e3 --- /dev/null +++ b/.vscode/lldb_rust_init.py @@ -0,0 +1,120 @@ +"""LLDB Rust pretty-printer bootstrap for CodeLLDB. + +Loads official Rust formatters (lldb_lookup + lldb_commands) without hardcoded toolchain paths. +Adds summaries for core collection types and adjusts string length. Safe to re-run. +""" +from __future__ import annotations +import os, sys, shutil, subprocess + +def log(*parts: object) -> None: + print("[lldb_rust_init]", *parts) + +# 1) Ensure ~/.cargo/bin on PATH so rustc is discoverable when launched via CodeLLDB. +home = os.environ.get("HOME", "") +if home: + cargo_bin = os.path.join(home, ".cargo", "bin") + path = os.environ.get("PATH", "") + parts = path.split(os.pathsep) if path else [] + if cargo_bin not in parts: + # Prepend cargo_bin preserving existing PATH using the correct platform-specific separator. + os.environ["PATH"] = os.pathsep.join([cargo_bin] + parts) + log("PATH prepended with", cargo_bin) +else: + log("HOME unset; skipping PATH prepend") + +# 2) Locate rustc & sysroot. +rustc = shutil.which("rustc") +if not rustc: + log("rustc NOT FOUND; aborting formatter init") + raise SystemExit +log("rustc ->", rustc) +try: + sysroot = subprocess.check_output([rustc, "--print", "sysroot"], text=True).strip() +except Exception as e: # noqa: BLE001 + log("Failed to get sysroot:", e) + raise SystemExit +log("sysroot ->", sysroot) + +etc_dir = os.path.join(sysroot, "lib", "rustlib", "etc") +if not os.path.isdir(etc_dir): + log("Missing etc dir:", etc_dir) + raise SystemExit +log("Loading Rust formatters from", etc_dir) + +# 3) Import lldb_lookup & source lldb_commands via LLDB command API. +if etc_dir not in sys.path: + sys.path.append(etc_dir) +try: + import lldb_lookup # type: ignore + log("Imported lldb_lookup OK") +except Exception as e: # noqa: BLE001 + log("Import lldb_lookup FAILED:", e) + raise SystemExit + +# Acquire lldb debugger object from injected global 'lldb' (provided by CodeLLDB environment). +try: + import lldb # type: ignore +except Exception as e: # noqa: BLE001 + log("Unable to import lldb module (unexpected):", e) + raise SystemExit + +dbg = lldb.debugger + +# Source the static commands file for additional summaries (matches rust-lldb behavior). +commands_path = os.path.join(etc_dir, "lldb_commands") +if os.path.isfile(commands_path): + dbg.HandleCommand(f"command source -s 0 {commands_path}") + log("Sourced", commands_path) +else: + log("Commands file not found:", commands_path) + +# Enable Rust category & increase max string summary length. +dbg.HandleCommand("type category enable Rust") +dbg.HandleCommand("settings set target.max-string-summary-length 2000") + +# Register Vec printers explicitly (defensive if commands file changes in future). +dbg.HandleCommand( + 'type synthetic add -l lldb_lookup.synthetic_lookup -x "^(alloc::([a-z_]+::)+)Vec<.+>$" --category Rust' +) +dbg.HandleCommand( + 'type summary add -F lldb_lookup.summary_lookup -e -x -h "^(alloc::([a-z_]+::)+)Vec<.+>$" --category Rust' +) + +# Provide a concise summary for URI types (lsp_types::Url / fluent_uri::Uri wrappers). +# These commonly contain an inner String we want to display directly. +# We attempt a regex that matches types ending with `::Uri` or `::Url` and which have a +# single field referencing alloc::string::String. +def uri_summary(val_obj): # noqa: D401 + """LLDB summary callback for various Uri/Url wrapper types. + + Tries to locate an inner alloc::string::String and return its contents. + Falls back to existing lldb_lookup.summary_lookup if structure differs. + """ + try: + # Heuristics: search immediate children then recurse one level. + for child in val_obj.children: + ty = child.type.name or "" + if "alloc::string::String" in ty: + # Use the default Rust String summary by delegating to lldb_lookup. + import lldb_lookup # type: ignore + return lldb_lookup.summary_lookup(child) + # Recurse one level for wrappers like tuple struct or newtype. + for child in val_obj.children: + for gchild in child.children: + ty = gchild.type.name or "" + if "alloc::string::String" in ty: + import lldb_lookup # type: ignore + return lldb_lookup.summary_lookup(gchild) + except Exception as e: # noqa: BLE001 + return f"" + return "" + +try: + dbg.HandleCommand( + 'type summary add -e -x -F lldb_rust_init.uri_summary "^(.*(::)+)(Url|Uri)$" --category Rust' + ) + log("Registered custom Url/Uri summary") +except Exception as e: + log("Failed to register custom Url/Uri summary:", e) + +log("Rust formatter initialization complete") diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000000..1f33deea4dcf5 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,20 @@ +{ + // Fix for CodeLLDB v1.11.0+ not showing Rust Vec contents in debugger + // + // Problem: CodeLLDB v1.11.0+ stopped bundling Rust pretty printers and expects + // them to load automatically from the Rust toolchain, but this often fails. + // Symptoms: Vec shows only "buf" and "len" instead of expandable elements + // + // Solution: Manually load the Rust LLDB formatters using preRunCommands + // - Uses dynamic path resolution via `rustc --print sysroot` for cross-platform compatibility + // - Works on Linux, Windows, and macOS without hardcoded paths + // + // Reference: https://github.com/vadimcn/codelldb/issues/1166 + "lldb.launch.initCommands": [ + // Import Rust LLDB formatters and custom URI summaries + "script import sys; sys.path.append('.vscode'); import lldb_rust_init" + ], + "lldb.showDisassembly": "auto", + "lldb.dereferencePointers": true, + "lldb.consoleMode": "commands", +} diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 08a64eb76f4fe..9d0fcf3078784 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -58,6 +58,23 @@ "$tsc" ] }, + { + "label": "extension: compile", + "type": "npm", + "script": "compile", + "options": { + "cwd": "${workspaceFolder}/editors/vscode" + }, + "group": "build", + "presentation": { + "panel": "dedicated", + "reveal": "silent", + "clear": true + }, + "problemMatcher": [ + "$tsc" + ] + }, { "type": "shell", "command": "cd ./editors/vscode && npm run watch", @@ -71,6 +88,17 @@ "panel": "dedicated", "reveal": "never" } + }, + { + "label": "build: extension+rust", + "dependsOn": [ + "extension: compile", + "rust: cargo test --no-run" + ], + "problemMatcher": [], + "group": { + "kind": "build" + } } ] } diff --git a/Cargo.lock b/Cargo.lock index 314f9f7599930..648a1c83197f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1442,6 +1442,17 @@ dependencies = [ "simd-adler32", ] +[[package]] +name = "mio" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + [[package]] name = "napi" version = "3.4.0" @@ -3508,6 +3519,16 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -3685,8 +3706,13 @@ version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ + "bytes", + "libc", + "mio", "pin-project-lite", + "socket2", "tokio-macros", + "windows-sys 0.61.2", ] [[package]] @@ -4211,7 +4237,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -4220,7 +4246,16 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", ] [[package]] @@ -4238,14 +4273,31 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -4263,48 +4315,96 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + [[package]] name = "wit-bindgen" version = "0.46.0" diff --git a/Cargo.toml b/Cargo.toml index 4ba597d71fea3..7f2a35c0799f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -252,6 +252,14 @@ oxc_ast_macros.opt-level = 1 insta.opt-level = 3 similar.opt-level = 3 +# Re-enable debuginfo for the language server specifically so Rust breakpoints bind when +# debugging via the "Language Server (Socket)" launch configuration. The top-level +# `[profile.dev] debug = false` disables DWARF info globally which caused LLDB to only +# show assembly and ignore source breakpoints in this binary. +[profile.dev.package.oxc_language_server] +debug = true +opt-level = 0 + [profile.release.package.oxc_playground_napi] opt-level = 'z' diff --git a/README.md b/README.md index 774e4ab5fb1eb..8e0328ebda168 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,27 @@ For more information, check out our website at [oxc.rs](https://oxc.rs). * Oxidation is the chemical process that creates rust +## Workspace-wide Linting (Experimental) + +Oxlint now supports an optional workspace-wide linting mode in the VS Code extension. + +When enabled, all lintable files in the workspace are analyzed eagerly (not only the ones you open). This provides: + +- Immediate visibility of problems across the entire codebase in the Problems panel +- Automatic incremental re-linting of changed files via LSP file watchers +- Respect for `.gitignore` and oxlint ignore patterns +- High performance through internal parallelization and batching + +Enable it via the VS Code setting: `oxc.lint.workspaceMode`. + +Notes: +- This feature is currently experimental and disabled by default. +- Designed to scale to large monorepos (thousands of files) while remaining fast. +- Only re-lints changed files after the initial scan to minimize overhead. +- Supports common JS/TS framework file extensions (js, mjs, cjs, jsx, ts, mts, cts, tsx, vue, svelte, astro). + +Planned improvements include progress reporting during the initial scan and additional configurability (custom include/exclude globs). + ## 🏗️ Design Principles - **Performance**: Through rigorous performance engineering. diff --git a/crates/oxc_language_server/Cargo.toml b/crates/oxc_language_server/Cargo.toml index af6ee3aa18f69..ff86d53ef86a6 100644 --- a/crates/oxc_language_server/Cargo.toml +++ b/crates/oxc_language_server/Cargo.toml @@ -38,7 +38,7 @@ papaya = { workspace = true } rustc-hash = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } -tokio = { workspace = true, features = ["rt-multi-thread", "io-std", "macros"] } +tokio = { workspace = true, features = ["rt-multi-thread", "io-std", "io-util", "macros", "net"] } tower-lsp-server = { workspace = true, features = ["proposed"] } [dev-dependencies] diff --git a/crates/oxc_language_server/src/backend.rs b/crates/oxc_language_server/src/backend.rs index f4be0ab0883ee..6ca3ed4a6f100 100644 --- a/crates/oxc_language_server/src/backend.rs +++ b/crates/oxc_language_server/src/backend.rs @@ -24,6 +24,7 @@ use crate::{ commands::{FIX_ALL_COMMAND_ID, FixAllCommandArgs}, file_system::LSPFileSystem, linter::server_linter::ServerLinterRun, + log_bridge::set_global_client, options::{Options, WorkspaceOption}, worker::WorkspaceWorker, }; @@ -207,6 +208,29 @@ impl LanguageServer for Backend { } } + // Initialize workspace mode if enabled + let mut workspace_diagnostics = Vec::new(); + if capabilities.dynamic_watchers { + for worker in workers { + let options = worker.get_options().await; + if options.as_ref().is_some_and(|o| o.lint.workspace_mode) { + // Register source file watchers + if let Some(registration) = worker.init_source_file_watchers().await { + registrations.push(registration); + } + + // Perform initial workspace scan + let diagnostics = worker.lint_workspace().await; + workspace_diagnostics.extend(diagnostics); + } + } + } + + // Publish all workspace diagnostics + if !workspace_diagnostics.is_empty() { + self.publish_all_diagnostics(&workspace_diagnostics).await; + } + if capabilities.dynamic_formatting { // check if one workspace has formatting enabled let mut started_worker = false; @@ -361,7 +385,8 @@ impl LanguageServer for Backend { } } - /// This notification is sent when a configuration file of a tool changes (example: `.oxlintrc.json`). + /// This notification is sent when a configuration file of a tool changes (example: `.oxlintrc.json`) + /// or when source files change in workspace mode. /// The server will re-lint the affected files and send updated diagnostics. /// /// See: @@ -381,11 +406,28 @@ impl LanguageServer for Backend { else { continue; }; - let Some(diagnostics) = worker.did_change_watched_files(file_event).await else { - continue; - }; - all_diagnostics.extend(diagnostics); + // Check if this is a config file change or source file change + if is_source_file(&file_event.uri) { + // Source file change - lint the file if workspace mode is enabled + let options = worker.get_options().await; + if options.as_ref().is_some_and(|o| o.lint.workspace_mode) { + if let Some(diagnostics) = + worker.lint_file(&file_event.uri, None, ServerLinterRun::Always).await + { + all_diagnostics.push(( + file_event.uri.to_string(), + diagnostics.into_iter().map(|d| d.diagnostic).collect(), + )); + } + } + } else { + // Config file change - use existing behavior + let Some(diagnostics) = worker.did_change_watched_files(file_event).await else { + continue; + }; + all_diagnostics.extend(diagnostics); + } } if !all_diagnostics.is_empty() { @@ -658,6 +700,8 @@ impl Backend { /// It also holds the capabilities of the language server and an in-memory file system. /// The client is used to communicate with the LSP client. pub fn new(client: Client) -> Self { + // Register the client for global LSP log bridging. + set_global_client(client.clone()); Self { client, workspace_workers: Arc::new(RwLock::new(vec![])), @@ -716,3 +760,18 @@ impl Backend { .await; } } + +/// Check if a URI points to a source file that should be linted +use tower_lsp_server::UriExt; // bring trait for to_file_path into scope + +fn is_source_file(uri: &Uri) -> bool { + const SOURCE_EXTENSIONS: &[&str] = + &["js", "jsx", "mjs", "cjs", "ts", "tsx", "mts", "cts", "vue", "svelte", "astro"]; + let Some(path) = uri.to_file_path() else { + return false; + }; + let Some(ext) = path.extension().and_then(|e| e.to_str()) else { + return false; + }; + SOURCE_EXTENSIONS.contains(&ext) +} diff --git a/crates/oxc_language_server/src/linter/isolated_lint_handler.rs b/crates/oxc_language_server/src/linter/isolated_lint_handler.rs index fb0e76ed6931b..afdc092708423 100644 --- a/crates/oxc_language_server/src/linter/isolated_lint_handler.rs +++ b/crates/oxc_language_server/src/linter/isolated_lint_handler.rs @@ -3,16 +3,18 @@ use std::{ sync::{Arc, OnceLock}, }; -use log::debug; +use log::{debug, info}; use oxc_data_structures::rope::Rope; +use oxc_linter::read_to_string; use rustc_hash::FxHashSet; +use std::collections::HashMap; use tower_lsp_server::{UriExt, lsp_types::Uri}; use oxc_allocator::Allocator; use oxc_linter::{ AllowWarnDeny, ConfigStore, DirectivesStore, DisableDirectives, Fix, LINTABLE_EXTENSIONS, LintOptions, LintService, LintServiceOptions, Linter, Message, PossibleFixes, RuleCommentType, - RuntimeFileSystem, read_to_arena_str, read_to_string, + RuntimeFileSystem, read_to_arena_str, }; use super::error_with_position::{ @@ -139,6 +141,90 @@ impl IsolatedLintHandler { messages } + /// Batch lint multiple paths using the underlying parallel runtime. + /// Returns a vector of (Uri, DiagnosticReport list). Ignores non-lintable paths silently. + pub fn run_workspace(&mut self, paths: &[PathBuf]) -> Vec<(Uri, Vec)> { + // Filter to lintable extensions first. + let lintable: Vec = + paths.iter().filter(|p| Self::should_lint_path(p)).cloned().collect(); + if lintable.is_empty() { + return Vec::new(); + } + info!( + "[isolated] workspace batch start paths_total={} lintable={}", + paths.len(), + lintable.len() + ); + let t_batch = Some(std::time::Instant::now()); + let arc_paths: Vec> = + lintable.iter().map(|p| Arc::from(p.as_os_str())).collect(); + + // Run parallel lint across all entry paths. + let messages = self.service.with_paths(arc_paths).run_source(); + + // Group messages by originating file path. + let mut grouped: HashMap, Vec> = HashMap::new(); + for msg in messages { + grouped.entry(msg.file_path.clone()).or_default().push(msg); + } + + let mut out: Vec<(Uri, Vec)> = Vec::with_capacity(grouped.len()); + + for (file_os, msgs) in grouped.into_iter() { + let path_buf = PathBuf::from(file_os.as_ref()); + // Read source text (skip if unreadable). + let Ok(source_text) = read_to_string(&path_buf) else { + continue; + }; + let rope = Rope::from_str(&source_text); + let Some(uri) = Uri::from_file_path(&path_buf) else { + continue; + }; + + let mut reports: Vec = msgs + .iter() + .map(|m| message_to_lsp_diagnostic(m, &uri, &source_text, &rope)) + .collect(); + + // Append unused directives diagnostics if configured + if let Some(severity) = self.unused_directives_severity + && let Some(directives) = self.directives_coordinator.get(&path_buf) + { + let unused = create_unused_directives_messages(&directives, severity, &source_text); + reports.extend( + unused.iter().map(|m| message_to_lsp_diagnostic(m, &uri, &source_text, &rope)), + ); + } + + // Inverted related span diagnostics + let inverted = generate_inverted_diagnostics(&reports, &uri); + reports.extend(inverted); + + let count = reports.len(); + out.push((uri.clone(), reports)); + info!("[isolated] workspace file uri={} diagnostics={}", uri.as_str(), count); + } + + // Stable ordering for deterministic publish (by URI string) + out.sort_unstable_by(|a, b| a.0.as_str().cmp(b.0.as_str())); + if let Some(t_batch) = t_batch { + debug!( + "[profile] workspace isolated batch lintable={} output_files={} total_diagnostics={} ms={}", + lintable.len(), + out.len(), + out.iter().map(|(_, ds)| ds.len()).sum::(), + t_batch.elapsed().as_millis() + ); + } + info!( + "[isolated] workspace batch done lintable={} output_files={} total_diagnostics={}", + lintable.len(), + out.len(), + out.iter().map(|(_, ds)| ds.len()).sum::() + ); + out + } + fn should_lint_path(path: &Path) -> bool { static WANTED_EXTENSIONS: OnceLock> = OnceLock::new(); let wanted_exts = diff --git a/crates/oxc_language_server/src/linter/options.rs b/crates/oxc_language_server/src/linter/options.rs index 18c6eebd8cd30..94dd07e4a832a 100644 --- a/crates/oxc_language_server/src/linter/options.rs +++ b/crates/oxc_language_server/src/linter/options.rs @@ -31,6 +31,7 @@ pub struct LintOptions { pub type_aware: bool, pub disable_nested_config: bool, pub fix_kind: LintFixKindFlag, + pub workspace_mode: bool, } #[derive(Debug, Default, Serialize, PartialEq, Eq, Deserialize, Clone)] @@ -135,6 +136,9 @@ impl TryFrom for LintOptions { Some(&"all") => LintFixKindFlag::All, _ => LintFixKindFlag::default(), }), + workspace_mode: object + .get("workspaceMode") + .is_some_and(|key| serde_json::from_value::(key.clone()).unwrap_or_default()), }) } } diff --git a/crates/oxc_language_server/src/linter/server_linter.rs b/crates/oxc_language_server/src/linter/server_linter.rs index c2798e2b65c30..ea3e484a90a68 100644 --- a/crates/oxc_language_server/src/linter/server_linter.rs +++ b/crates/oxc_language_server/src/linter/server_linter.rs @@ -3,9 +3,10 @@ use std::str::FromStr; use std::sync::Arc; use ignore::gitignore::Gitignore; -use log::{debug, warn}; +use log::{debug, info, warn}; use oxc_linter::{AllowWarnDeny, FixKind, LintIgnoreMatcher}; use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet}; +use std::time::Instant; use tokio::sync::Mutex; use tower_lsp_server::lsp_types::{Diagnostic, Pattern, Uri}; @@ -34,13 +35,17 @@ pub enum ServerLinterRun { } pub struct ServerLinter { - isolated_linter: Arc>, - tsgo_linter: Arc>, + pub(crate) isolated_linter: Arc>, + pub(crate) tsgo_linter: Arc>, ignore_matcher: LintIgnoreMatcher, gitignore_glob: Vec, lint_on_run: Run, diagnostics: ServerLinterDiagnostics, extended_paths: FxHashSet, + // When true (workspace_mode enabled), type-aware (tsgo) linting is performed only in batch + // via WorkspaceWorker::lint_workspace. Per-file tsgo invocations are suppressed to avoid + // repeated process startup and improve performance on large projects. + workspace_mode: bool, } #[derive(Debug, Default)] @@ -86,6 +91,78 @@ impl ServerLinterDiagnostics { } impl ServerLinter { + /// Batch lint a set of URIs, merging isolated (oxlint) and type-aware (tsgo) diagnostics when applicable. + /// Caller must provide URIs already filtered for ignore patterns if desired; this method re-checks ignores. + pub async fn lint_workspace_batch(&self, uris: &[Uri]) -> Vec<(String, Vec)> { + // Early exit if nothing to process or no isolated linter. + if uris.is_empty() { + return Vec::new(); + } + + // Filter out ignored again for safety (cheap). + let filtered: Vec = uris.iter().filter(|u| !self.is_ignored(u)).cloned().collect(); + if filtered.is_empty() { + return Vec::new(); + } + + let type_aware_active = self.tsgo_linter.as_ref().is_some(); + use std::collections::HashMap; + let t_overall = Instant::now(); + + // Always run isolated batch first (includes JS + TS non-type-aware rules). + let isolated_paths: Vec = filtered + .iter() + .filter_map(|u| u.to_file_path().map(|p| p.into_owned())) + .collect(); + let t_isolated = Instant::now(); + let isolated_batch = self.run_workspace_isolated(&isolated_paths).await; + debug!( + "[profile] batch isolated files={} ms={}", + isolated_batch.len(), + t_isolated.elapsed().as_millis() + ); + + let mut merged: HashMap> = HashMap::with_capacity(isolated_batch.len()); + for (uri, reports) in isolated_batch.into_iter() { + self.cache_isolated_diagnostics(&uri, Some(reports.clone())); + let key = uri.to_string(); + merged.entry(key).or_default().extend(reports.into_iter().map(|r| r.diagnostic)); + } + + // Run tsgo batch only if active. + if type_aware_active { + if let Some(tsgo) = self.tsgo_linter.as_ref() { + let t_tsgo = Instant::now(); + let tsgo_batch = tsgo.lint_batch(&filtered); + info!("[tsgo] batch done produced={} ms={}", tsgo_batch.len(), t_tsgo.elapsed().as_millis()); + debug!( + "[profile] tsgo batch eligible={} produced={} ms={}", + filtered.len(), + tsgo_batch.len(), + t_tsgo.elapsed().as_millis() + ); + for (uri, reports) in tsgo_batch.into_iter() { + self.cache_tsgo_diagnostics(&uri, Some(reports.clone())); + let key = uri.to_string(); + merged.entry(key).or_default().extend(reports.into_iter().map(|r| r.diagnostic)); + } + } + } + + let mut out: Vec<(String, Vec)> = merged + .into_iter() + .map(|(uri, mut diags)| { + diags.sort_by(|a, b| { + (a.range.start.line, a.range.start.character, a.range.end.line, a.range.end.character) + .cmp(&(b.range.start.line, b.range.start.character, b.range.end.line, b.range.end.character)) + }); + (uri, diags) + }) + .collect(); + out.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + debug!("[profile] batch total files={} ms={}", out.len(), t_overall.elapsed().as_millis()); + out + } pub fn new(root_uri: &Uri, options: &LSPLintOptions) -> Self { let root_path = root_uri.to_file_path().unwrap(); let mut nested_ignore_patterns = Vec::new(); @@ -165,7 +242,7 @@ impl ServerLinter { }, ); - Self { + let instance = Self { isolated_linter: Arc::new(Mutex::new(isolated_linter)), ignore_matcher: LintIgnoreMatcher::new( &base_patterns, @@ -181,7 +258,16 @@ impl ServerLinter { } else { Arc::new(None) }, - } + workspace_mode: options.workspace_mode, + }; + info!( + "[linter] init root={} workspace_mode={} type_aware={} run_mode={:?}", + root_path.display(), + instance.workspace_mode, + options.type_aware, + options.run + ); + instance } /// Searches inside root_uri recursively for the default oxlint config files @@ -285,27 +371,26 @@ impl ServerLinter { } pub async fn revalidate_diagnostics(&self, uris: Vec) -> Vec<(String, Vec)> { + // Use batch path when workspace_mode is enabled (regardless of type-aware), else fallback to sequential. + if self.workspace_mode { + return self.lint_workspace_batch(&uris).await; + } let mut diagnostics = Vec::with_capacity(uris.len()); for uri in uris { - if let Some(file_diagnostic) = - self.run_single(&uri, None, ServerLinterRun::Always).await - { - diagnostics.push(( - uri.to_string(), - file_diagnostic.into_iter().map(|d| d.diagnostic).collect(), - )); + if let Some(file_diagnostic) = self.run_single(&uri, None, ServerLinterRun::Always).await { + diagnostics.push((uri.to_string(), file_diagnostic.into_iter().map(|d| d.diagnostic).collect())); } } diagnostics } - fn is_ignored(&self, uri: &Uri) -> bool { + pub fn is_ignored(&self, uri: &Uri) -> bool { let Some(uri_path) = uri.to_file_path() else { return true; }; if self.ignore_matcher.should_ignore(&uri_path) { - debug!("ignored: {uri:?}"); + debug!("ignored: {uri_path:?}"); return true; } @@ -314,7 +399,7 @@ impl ServerLinter { continue; } if gitignore.matched_path_or_any_parents(&uri_path, uri_path.is_dir()).is_ignore() { - debug!("ignored: {uri:?}"); + debug!("ignored: {uri_path:?}"); return true; } } @@ -353,23 +438,77 @@ impl ServerLinter { } if oxlint { - let diagnostics = { - let mut isolated_linter = self.isolated_linter.lock().await; - isolated_linter.run_single(uri, content.clone()) - }; + let t0 = Some(Instant::now()); + let mut isolated_linter = self.isolated_linter.lock().await; + let diagnostics = isolated_linter.run_single(uri, content.clone()); + if let Some(t0) = t0 { + debug!( + "[profile] isolated run_single uri={:?} ms={}", + uri, + t0.elapsed().as_millis() + ); + } self.diagnostics.isolated_linter.pin().insert(uri.to_string(), diagnostics); } - if tsgolint && let Some(tsgo_linter) = self.tsgo_linter.as_ref() { - self.diagnostics - .tsgo_linter - .pin() - .insert(uri.to_string(), tsgo_linter.lint_file(uri, content.clone())); + // Suppress per-file tsgo linting when workspace_mode is active; rely on batch results. + if tsgolint { + if !self.workspace_mode { + if let Some(tsgo_linter) = self.tsgo_linter.as_ref() { + let t0 = Some(Instant::now()); + let res = tsgo_linter.lint_file(uri, content.clone()); + info!( + "[tsgo] single run uri={} diagnostics={}", + uri.as_str(), + res.as_ref().map(|v| v.len()).unwrap_or(0) + ); + if let Some(t0) = t0 { + debug!( + "[profile] tsgo single uri={:?} ms={} diagnostics={}", + uri, + t0.elapsed().as_millis(), + res.as_ref().map(|v| v.len()).unwrap_or(0) + ); + } + self.diagnostics.tsgo_linter.pin().insert(uri.to_string(), res); + } + } else { + debug!("[tsgo] single suppressed (workspace_mode) uri={}", uri.as_str()); + } } self.diagnostics.get_diagnostics(&uri.to_string()) } + /// Cache isolated (oxlint) diagnostics for a file. + pub fn cache_isolated_diagnostics( + &self, + uri: &Uri, + diagnostics: Option>, + ) { + self.diagnostics.isolated_linter.pin().insert(uri.to_string(), diagnostics); + } + + /// Cache tsgo (type-aware) diagnostics for a file. + pub fn cache_tsgo_diagnostics(&self, uri: &Uri, diagnostics: Option>) { + self.diagnostics.tsgo_linter.pin().insert(uri.to_string(), diagnostics); + } + + /// Batch run isolated linter across multiple paths using parallel runtime. + /// Does not include tsgo/type-aware diagnostics (falls back to sequential path when needed). + pub async fn run_workspace_isolated( + &self, + paths: &[PathBuf], + ) -> Vec<(Uri, Vec)> { + use tokio::sync::MutexGuard; + + let t0 = Instant::now(); + let mut handler: MutexGuard<'_, IsolatedLintHandler> = self.isolated_linter.lock().await; + let out = handler.run_workspace(paths); + debug!("[profile] isolated batch files={} ms={}", paths.len(), t0.elapsed().as_millis()); + out + } + pub fn needs_restart(old_options: &LSPLintOptions, new_options: &LSPLintOptions) -> bool { old_options.config_path != new_options.config_path || old_options.ts_config_path != new_options.ts_config_path @@ -378,6 +517,8 @@ impl ServerLinter { || old_options.unused_disable_directives != new_options.unused_disable_directives // TODO: only the TsgoLinter needs to be dropped or created || old_options.type_aware != new_options.type_aware + // workspace-wide linting mode impacts watcher setup and cached diagnostics strategy + || old_options.workspace_mode != new_options.workspace_mode } pub fn get_watch_patterns(&self, options: &LSPLintOptions, root_path: &Path) -> Vec { diff --git a/crates/oxc_language_server/src/linter/tsgo_linter.rs b/crates/oxc_language_server/src/linter/tsgo_linter.rs index 8735467755cd9..5f3a8854e12f5 100644 --- a/crates/oxc_language_server/src/linter/tsgo_linter.rs +++ b/crates/oxc_language_server/src/linter/tsgo_linter.rs @@ -1,14 +1,16 @@ use std::{ - path::Path, + path::{Path, PathBuf}, sync::{Arc, OnceLock}, }; +use log::debug; use oxc_data_structures::rope::Rope; use oxc_linter::{ ConfigStore, LINTABLE_EXTENSIONS, TsGoLintState, loader::LINT_PARTIAL_LOADER_EXTENSIONS, read_to_string, }; use rustc_hash::FxHashSet; +use std::time::Instant; use tower_lsp_server::{UriExt, lsp_types::Uri}; use crate::linter::error_with_position::{ @@ -36,8 +38,9 @@ impl TsgoLinter { let rope = Rope::from_str(&source_text); // TODO: Avoid cloning the source text - let messages = - self.state.lint_source(&Arc::from(path.as_os_str()), source_text.clone()).ok()?; + let t0 = Instant::now(); + let messages_group = self.state.lint_source(&[Arc::from(path.as_os_str())]).ok()?; + let messages = messages_group.into_iter().next().map(|(_, v)| v)?; let mut diagnostics: Vec = messages .iter() @@ -47,6 +50,12 @@ impl TsgoLinter { let mut inverted_diagnostics = generate_inverted_diagnostics(&diagnostics, uri); diagnostics.append(&mut inverted_diagnostics); + debug!( + "[profile] tsgo single internal uri={:?} diagnostics={} ms={}", + uri, + diagnostics.len(), + t0.elapsed().as_millis() + ); Some(diagnostics) } @@ -64,4 +73,86 @@ impl TsgoLinter { .and_then(std::ffi::OsStr::to_str) .is_some_and(|ext| wanted_exts.contains(ext)) } + + /// Batch lint multiple URIs using a single tsgolint invocation. + /// Returns vector of (Uri, DiagnosticReport) for each file with diagnostics. + pub fn lint_batch(&self, uris: &[Uri]) -> Vec<(Uri, Vec)> { + // Prepare eligible paths (filter out unsupported extensions early). + let mut path_map: Vec<(Uri, PathBuf)> = Vec::with_capacity(uris.len()); + for uri in uris { + if let Some(p) = uri.to_file_path() { + let owned = p.into_owned(); + if Self::should_lint_path(&owned) { + path_map.push((uri.clone(), owned)); + } + } + } + + if path_map.is_empty() { + return Vec::new(); + } + + let arcs: Vec> = + path_map.iter().map(|(_, p)| Arc::from(p.as_os_str())).collect(); + // Collect simple metrics before invoking lint_source. + let mut total_bytes: u64 = 0; + let mut ext_counts: std::collections::HashMap = + std::collections::HashMap::new(); + for (_, p) in &path_map { + if let Ok(meta) = std::fs::metadata(p) { + total_bytes += meta.len(); + } + if let Some(ext) = p.extension().and_then(std::ffi::OsStr::to_str) { + *ext_counts.entry(ext.to_string()).or_insert(0) += 1; + } + } + let t0 = Instant::now(); + log::info!( + "[tsgo] internal batch start eligible_paths={} total_bytes={} ext_stats={:?}", + arcs.len(), + total_bytes, + ext_counts + ); + let t_invoke = Instant::now(); + let batch_result = self.state.lint_source(&arcs); + let invoke_ms = t_invoke.elapsed().as_millis(); + let mut out = Vec::new(); + + match batch_result { + Ok(grouped) => { + for (path_buf, messages) in grouped.into_iter() { + // Map back to Uri by matching file path string + if let Some((uri, _)) = + path_map.iter().find(|(_, original)| original == &path_buf) + { + if let Ok(source_text) = read_to_string(&path_buf) { + let rope = Rope::from_str(&source_text); + let mut diagnostics: Vec = messages + .iter() + .map(|m| message_to_lsp_diagnostic(m, uri, &source_text, &rope)) + .collect(); + let mut inverted = generate_inverted_diagnostics(&diagnostics, uri); + diagnostics.append(&mut inverted); + out.push((uri.clone(), diagnostics)); + } + } + } + } + Err(_err) => { + // On failure, degrade gracefully: no batch diagnostics (caller may fall back to sequential path). + } + } + + // Deterministic ordering + out.sort_unstable_by(|a, b| a.0.as_str().cmp(b.0.as_str())); + debug!( + "[tsgo] internal batch done eligible_paths={} produced={} elapsed_ms_total={} invoke_ms={} total_bytes={}", + path_map.len(), + out.len(), + t0.elapsed().as_millis(), + invoke_ms, + total_bytes + ); + out + } } diff --git a/crates/oxc_language_server/src/log_bridge.rs b/crates/oxc_language_server/src/log_bridge.rs new file mode 100644 index 0000000000000..25ff611f3020f --- /dev/null +++ b/crates/oxc_language_server/src/log_bridge.rs @@ -0,0 +1,75 @@ +use std::sync::{OnceLock, RwLock}; +use tower_lsp_server::{Client, lsp_types::MessageType}; + +// Global LSP client used for window/logMessage forwarding. We allow replacement +// on language server restarts. OnceLock guards initialization of the RwLock itself. +static GLOBAL_CLIENT: OnceLock>> = OnceLock::new(); + +fn client_cell() -> &'static RwLock> { + GLOBAL_CLIENT.get_or_init(|| RwLock::new(None)) +} + +/// Set or replace the global client (called from Backend::new on each start/restart). +pub fn set_global_client(client: Client) { + *client_cell().write().unwrap() = Some(client); +} + +/// Get a clone of the current client for sending notifications. +pub fn get_global_client() -> Option { + client_cell().read().unwrap().as_ref().cloned() +} + +// Global logger wrapper forwarding ALL log crate emissions to LSP window/logMessage. +pub struct LspForwardingLogger { + fallback: env_logger::Logger, +} + +impl log::Log for LspForwardingLogger { + fn enabled(&self, metadata: &log::Metadata) -> bool { + self.fallback.enabled(metadata) + } + + fn log(&self, record: &log::Record) { + if !self.enabled(record.metadata()) { + return; + } + // Try forwarding to LSP client if initialized. + if let Some(client) = get_global_client() { + let level = match record.level() { + log::Level::Error => MessageType::ERROR, + log::Level::Warn => MessageType::WARNING, + log::Level::Info => MessageType::INFO, + _ => MessageType::LOG, + }; + // The tower_lsp_server Client API is async; calling log_message returns a lazy Future. + // Previously we ignored it, so the notification was never actually sent. + // Spawn the future on the current Tokio runtime if available. If no runtime is + // present (e.g. in certain test contexts), we silently skip forwarding to avoid + // panicking. The fallback logger below still emits the message to stdout/stderr. + if let Ok(handle) = tokio::runtime::Handle::try_current() { + let msg = record.args().to_string(); + handle.spawn(async move { + client.log_message(level, msg).await; + }); + } + } + // Always emit via fallback (stdout/stderr formatting, filtering, etc.). + self.fallback.log(record); + } + + fn flush(&self) { + self.fallback.flush(); + } +} + +/// Initialize the global logger with LSP forwarding. Call exactly once at program start. +pub fn init_global_logger() { + // Build env_logger using environment configuration (RUST_LOG, etc.) + let mut builder = env_logger::Builder::from_env(env_logger::Env::default()); + let fallback = builder.build(); + // Install composite logger. Ignore error if someone already set a logger (tests). + if log::set_boxed_logger(Box::new(LspForwardingLogger { fallback })).is_ok() { + // Allow logger's internal filter to decide; pass everything through log crate. + log::set_max_level(log::LevelFilter::Trace); + } +} diff --git a/crates/oxc_language_server/src/main.rs b/crates/oxc_language_server/src/main.rs index 72722db4fcca9..f505ac66ab85b 100644 --- a/crates/oxc_language_server/src/main.rs +++ b/crates/oxc_language_server/src/main.rs @@ -1,4 +1,8 @@ use rustc_hash::FxBuildHasher; +#[cfg(unix)] +use std::path::PathBuf; +#[cfg(unix)] +use tokio::net::UnixListener; use tower_lsp_server::{LspService, Server}; mod backend; @@ -8,6 +12,7 @@ mod commands; mod file_system; mod formatter; mod linter; +mod log_bridge; mod options; #[cfg(test)] mod tester; @@ -23,8 +28,71 @@ const FORMAT_CONFIG_FILES: &[&str; 2] = &[".oxfmtrc.json", ".oxfmtrc.jsonc"]; #[tokio::main] async fn main() { - env_logger::init(); + // Initialize composite logger that forwards log crate events to LSP (client set later). + crate::log_bridge::init_global_logger(); + // Optional external listen mode for debugger / external host integration. + // Activate by setting env var: OXC_LS_LISTEN=unix:/tmp/oxc_ls.sock + #[cfg(unix)] + if let Ok(listen_spec) = std::env::var("OXC_LS_LISTEN") { + if let Some(path) = listen_spec.strip_prefix("unix:") { + // Remove stale socket file (ignore errors) to avoid bind failures. + let _ = std::fs::remove_file(path); + match UnixListener::bind(path) { + Ok(listener) => { + // Guard to remove the socket file when the process exits (listener dropped). + struct SocketCleanup(PathBuf); + impl Drop for SocketCleanup { + fn drop(&mut self) { + // Best-effort cleanup; ignore errors. + let _ = std::fs::remove_file(&self.0); + } + } + let _cleanup = SocketCleanup(PathBuf::from(path)); + + eprintln!("[oxc-language-server] Listening on unix socket: {path}"); + + // Accept loop: allow sequential LSP sessions (e.g., VSCode client.restart()) + // without requiring an external supervisor to recreate the server. + // Each iteration runs a full LSP lifecycle (initialize -> ... -> shutdown/exit). + // The VSCode extension sends explicit shutdown/exit before a restart when + // OXC_LS_CONNECT is set. After serve() returns we immediately accept the next + // connection. This prevents connection refusal races previously observed when + // the server exited entirely and the client retried before a new process was + // spawned. + loop { + let (stream, _addr) = match listener.accept().await { + Ok(v) => v, + Err(err) => { + eprintln!( + "[oxc-language-server] Accept error: {err}. Shutting down accept loop." + ); + break; // exit to stdio fallback? we already bound unix, so just end. + } + }; + + eprintln!("[oxc-language-server] Client connected. Starting LSP session."); + let (service, socket) = LspService::build(Backend::new).finish(); + let (read_half, write_half) = tokio::io::split(stream); + Server::new(read_half, write_half, socket).serve(service).await; + eprintln!( + "[oxc-language-server] LSP session ended. Waiting for next client..." + ); + // Loop continues to accept next connection (e.g., after a client restart). + } + // After breaking from loop (e.g., accept error), exit main. + return; + } + Err(err) => { + eprintln!( + "[oxc-language-server] Failed to bind unix socket {path}: {err}. Falling back to stdio." + ); + } + } + } + } + + // Fallback stdio mode (default when no listen spec provided) let stdin = tokio::io::stdin(); let stdout = tokio::io::stdout(); diff --git a/crates/oxc_language_server/src/options.rs b/crates/oxc_language_server/src/options.rs index a2a4d582acaed..f7e2ba3f64218 100644 --- a/crates/oxc_language_server/src/options.rs +++ b/crates/oxc_language_server/src/options.rs @@ -3,13 +3,25 @@ use tower_lsp_server::lsp_types::Uri; use crate::{formatter::options::FormatOptions, linter::options::LintOptions}; -#[derive(Debug, Default, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Options { #[serde(flatten)] pub lint: LintOptions, #[serde(flatten)] pub format: FormatOptions, + #[serde(default)] + pub supported_extensions: Vec, +} + +impl Default for Options { + fn default() -> Self { + Self { + lint: LintOptions::default(), + format: FormatOptions::default(), + supported_extensions: Vec::new(), + } + } } #[derive(Debug, Serialize, Deserialize, Clone)] diff --git a/crates/oxc_language_server/src/worker.rs b/crates/oxc_language_server/src/worker.rs index e3dcaabb55aa6..94c3eaaaeb49b 100644 --- a/crates/oxc_language_server/src/worker.rs +++ b/crates/oxc_language_server/src/worker.rs @@ -1,5 +1,7 @@ -use log::debug; +use log::{debug, info}; use serde_json::json; +use std::path::PathBuf; +use std::sync::{Arc, Mutex as StdMutex}; use tokio::sync::{Mutex, RwLock}; use tower_lsp_server::{ UriExt, @@ -478,12 +480,282 @@ impl WorkspaceWorker { })), }); } + + // Handle workspace_mode transitions as part of restart logic + if current_option.lint.workspace_mode != changed_options.lint.workspace_mode { + if changed_options.lint.workspace_mode { + // Register source file watcher and perform initial scan + if let Some(source_registration) = self.init_source_file_watchers().await { + registrations.push(source_registration); + } + let workspace_diagnostics = self.lint_workspace().await; + if !workspace_diagnostics.is_empty() { + if let Some(existing) = &mut diagnostics { + existing.extend(workspace_diagnostics); + } else { + diagnostics = Some(workspace_diagnostics); + } + } + } else { + // Unregister source file watcher when disabling + unregistrations.push(Unregistration { + id: format!("watcher-source-files-{}", self.root_uri.as_str()), + method: "workspace/didChangeWatchedFiles".to_string(), + }); + // Optionally could clear diagnostics for non-open files here (future enhancement) + } + } } (diagnostics, registrations, unregistrations, formatting) } + + /// Lint all workspace files and return diagnostics + pub async fn lint_workspace(&self) -> Vec<(String, Vec)> { + // Acquire root path; if invalid, no work to do. + let Some(root_path) = self.root_uri.to_file_path() else { + return vec![]; + }; + + let t_overall = std::time::Instant::now(); + + // Acquire supported extensions from options + let supported_extensions: Vec = { + let guard = self.options.lock().await; + guard.as_ref().map(|o| o.supported_extensions.clone()).unwrap_or_default() + }; + + if supported_extensions.is_empty() { + return vec![]; + } + + // Build a types matcher (same logic as scan_workspace_files) but inline to avoid an extra sequential pass. + use ignore::types::TypesBuilder; + let all_simple = supported_extensions.iter().all(|ext| !ext.contains('.')); + let mut builder = TypesBuilder::new(); + if all_simple { + let glob = format!("*.{{{}}}", supported_extensions.join(",")); + if let Err(err) = builder.add("oxc", &glob) { + debug!("failed to add consolidated glob {glob}: {err}"); + } + builder.select("oxc"); + } else { + for ext in &supported_extensions { + let pattern = format!("*.{}", ext); + if let Err(err) = builder.add(ext, &pattern) { + debug!("failed to add type pattern {pattern}: {err}"); + } else { + builder.select(ext); + } + } + } + let types_matcher = match builder.build() { + Ok(m) => m, + Err(err) => { + debug!("failed to build types matcher: {err}"); + return vec![]; + } + }; + + // Shared collection for file URIs discovered in parallel. + let collected: Arc>> = Arc::new(StdMutex::new(Vec::new())); + + let mut walk_builder = ignore::WalkBuilder::new(&root_path); + walk_builder.hidden(false).git_ignore(true).types(types_matcher); + + // Parallel directory traversal. Each thread pushes file URIs into the shared vector. + walk_builder.build_parallel().run({ + let collected = Arc::clone(&collected); + move || { + let collected = Arc::clone(&collected); + Box::new(move |entry: Result| { + if let Ok(e) = entry { + if e.file_type().is_some_and(|ft| ft.is_file()) { + if let Some(uri) = Uri::from_file_path(e.path()) { + if let Ok(mut vec) = collected.lock() { + vec.push(uri); + } + } + } + } + ignore::WalkState::Continue + }) + } + }); + + // Extract, sort deterministically. + let mut files: Vec = match collected.lock() { + Ok(guard) => guard.clone(), + Err(poisoned) => { + log::warn!( + "Poisoned mutex encountered while collecting workspace files. Recovering inner value." + ); + poisoned.into_inner().clone() + } + }; + files.sort_unstable_by(|a, b| a.as_str().cmp(b.as_str())); + debug!( + "[profile] workspace scan root={} files={} ms={}", + root_path.display(), + files.len(), + t_overall.elapsed().as_millis() + ); + + // If type-aware linting is active, fall back to per-file path to preserve tsgo diagnostics. + let server_linter_guard = self.server_linter.read().await; + let Some(server_linter) = server_linter_guard.as_ref() else { return vec![] }; + + if let Some(tsgo_linter) = server_linter.tsgo_linter.as_ref() { + use std::collections::HashMap; + // Filter ignored paths first (matching isolated behavior) + let eligible_uris: Vec = + files.into_iter().filter(|u| !server_linter.is_ignored(u)).collect(); + info!( + "[tsgo] batch start root={} eligible={} (pre-extension-filter)", + root_path.display(), + eligible_uris.len() + ); + let t_tsgo = std::time::Instant::now(); + let tsgo_batch = tsgo_linter.lint_batch(&eligible_uris); + info!( + "[tsgo] batch done root={} produced={} elapsed_ms={}", + root_path.display(), + tsgo_batch.len(), + t_tsgo.elapsed().as_millis() + ); + debug!( + "[profile] tsgo batch eligible={} produced={} ms={}", + eligible_uris.len(), + tsgo_batch.len(), + t_tsgo.elapsed().as_millis() + ); + + // Collect tsgo diagnostics first + let mut merged: HashMap> = + HashMap::with_capacity(tsgo_batch.len()); + for (uri, reports) in tsgo_batch.into_iter() { + server_linter.cache_tsgo_diagnostics(&uri, Some(reports.clone())); + let key = uri.to_string(); + let diags: Vec = reports.into_iter().map(|r| r.diagnostic).collect(); + merged.entry(key).or_default().extend(diags); + } + + // Run isolated workspace lint (regular JS/TS rules excluding tsgo-specific ones) + let isolated_paths: Vec = eligible_uris + .iter() + .filter_map(|u| u.to_file_path().map(|p| p.into_owned())) + .collect(); + let t_isolated = std::time::Instant::now(); + let isolated_batch = server_linter.run_workspace_isolated(&isolated_paths).await; + debug!( + "[profile] isolated (with tsgo) eligible={} produced={} ms={}", + isolated_paths.len(), + isolated_batch.len(), + t_isolated.elapsed().as_millis() + ); + for (uri, reports) in isolated_batch.into_iter() { + server_linter.cache_isolated_diagnostics(&uri, Some(reports.clone())); + let key = uri.to_string(); + merged.entry(key).or_default().extend(reports.into_iter().map(|r| r.diagnostic)); + } + + // Stabilize ordering and positions + let mut out: Vec<(String, Vec)> = merged + .into_iter() + .map(|(uri, mut diags)| { + diags.sort_by(|a, b| { + ( + a.range.start.line, + a.range.start.character, + a.range.end.line, + a.range.end.character, + ) + .cmp(&( + b.range.start.line, + b.range.start.character, + b.range.end.line, + b.range.end.character, + )) + }); + (uri, diags) + }) + .collect(); + out.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + debug!( + "[profile] workspace lint total (tsgo+isolated) ms={} merged_files={}", + t_overall.elapsed().as_millis(), + out.len() + ); + return out; + } + + // Batch isolated lint using runtime parallelism. + // Filter ignored paths first to match single-file behavior. + let filtered_paths: Vec = files + .iter() + .filter(|u| !server_linter.is_ignored(u)) + .filter_map(|u| u.to_file_path().map(|p| p.into_owned())) + .collect(); + + let batch_reports = server_linter.run_workspace_isolated(&filtered_paths).await; + + let mut out: Vec<(String, Vec)> = Vec::with_capacity(batch_reports.len()); + for (uri, reports) in batch_reports.into_iter() { + // Cache isolated diagnostics (tsgo absent in this branch) + server_linter.cache_isolated_diagnostics(&uri, Some(reports.clone())); + out.push((uri.to_string(), reports.into_iter().map(|r| r.diagnostic).collect())); + } + debug!( + "[profile] isolated batch eligible={} produced={} ms={}", + filtered_paths.len(), + out.len(), + t_overall.elapsed().as_millis() + ); + out + } + + /// Register source file watchers for workspace mode + pub async fn init_source_file_watchers(&self) -> Option { + let options_guard = self.options.lock().await; + let lint_options = options_guard.as_ref().map(|o| &o.lint)?; + + if !lint_options.workspace_mode { + return None; + } + + let supported_extensions: Vec = + options_guard.as_ref().map(|o| o.supported_extensions.clone()).unwrap_or_default(); + + // Build glob patterns from supported extensions ("**/*.ext") + let source_patterns: Vec = + supported_extensions.iter().map(|ext| format!("**/*.{}", ext)).collect(); + + Some(Registration { + id: format!("watcher-source-files-{}", self.root_uri.as_str()), + method: "workspace/didChangeWatchedFiles".to_string(), + register_options: Some(json!(DidChangeWatchedFilesRegistrationOptions { + watchers: source_patterns + .into_iter() + .map(|pattern| FileSystemWatcher { + glob_pattern: GlobPattern::Relative(RelativePattern { + base_uri: OneOf::Right(self.root_uri.clone()), + pattern, + }), + kind: Some(WatchKind::all()), + }) + .collect::>(), + })), + }) + } + + /// Get the current options for this worker + pub async fn get_options(&self) -> Option { + self.options.lock().await.clone() + } } +// Removed legacy should_lint_file; TypesBuilder handles extension filtering. + fn range_overlaps(a: Range, b: Range) -> bool { a.start <= b.end && a.end >= b.start } @@ -724,6 +996,7 @@ mod test_watchers { experimental: true, config_path: Some("configs/formatter.json".to_string()), }, + ..Default::default() }, ); let watchers = tester.init_watchers(); @@ -822,6 +1095,7 @@ mod test_watchers { ..Default::default() }, format: FormatOptions { experimental: true, ..Default::default() }, + ..Default::default() }); assert_eq!(unregistrations.len(), 1); diff --git a/crates/oxc_linter/src/fixer/mod.rs b/crates/oxc_linter/src/fixer/mod.rs index 8ad61e4322998..5ff28caf8baf2 100644 --- a/crates/oxc_linter/src/fixer/mod.rs +++ b/crates/oxc_linter/src/fixer/mod.rs @@ -1,4 +1,6 @@ use std::borrow::Cow; +#[cfg(feature = "language_server")] +use std::{ffi::OsStr, sync::Arc}; use oxc_codegen::{Codegen, CodegenOptions}; use oxc_diagnostics::OxcDiagnostic; @@ -225,6 +227,8 @@ pub struct Message { fixed: bool, #[cfg(feature = "language_server")] pub section_offset: u32, + #[cfg(feature = "language_server")] + pub file_path: Arc, } impl Message { @@ -244,6 +248,8 @@ impl Message { fixed: false, #[cfg(feature = "language_server")] section_offset: 0, + #[cfg(feature = "language_server")] + file_path: Arc::from(OsStr::new("")), // will be set later via with_file_path } } @@ -254,6 +260,13 @@ impl Message { self } + #[cfg(feature = "language_server")] + #[must_use] + pub fn with_file_path(mut self, path: &Arc) -> Self { + self.file_path = Arc::clone(path); + self + } + /// move the offset of all spans to the right pub fn move_offset(&mut self, offset: u32) -> &mut Self { debug_assert!(offset != 0); diff --git a/crates/oxc_linter/src/service/runtime.rs b/crates/oxc_linter/src/service/runtime.rs index 075030920eba6..588de3dc39e91 100644 --- a/crates/oxc_linter/src/service/runtime.rs +++ b/crates/oxc_linter/src/service/runtime.rs @@ -611,10 +611,27 @@ impl Runtime { .to_mut() .replace_range(start..end, &fix_result.fixed_code); } - messages = fix_result.messages; + #[cfg(feature = "language_server")] + { + messages = fix_result + .messages + .into_iter() + .map(|m| m.with_file_path(&module_to_lint.path)) + .collect(); + } + #[cfg(not(feature = "language_server"))] + { + messages = fix_result.messages; + } } if !messages.is_empty() { + #[cfg(feature = "language_server")] + let errors = messages + .into_iter() + .map(|m| m.with_file_path(&module_to_lint.path).into()) + .collect(); + #[cfg(not(feature = "language_server"))] let errors = messages.into_iter().map(Into::into).collect(); let diagnostics = DiagnosticService::wrap_diagnostics( &me.cwd, @@ -652,6 +669,17 @@ impl Runtime { section_contents.len() ); + // Pre-compute section boundaries (start, end) for offset mapping before draining. + #[cfg(feature = "language_server")] + let section_bounds: Vec<(u32, u32)> = section_contents + .iter() + .map(|section| { + let start = section.source.start; + let end = start + section.source.source_text.len() as u32; + (start, end) + }) + .collect(); + let context_sub_hosts: Vec> = module_to_lint .section_module_records .into_iter() @@ -668,9 +696,12 @@ impl Runtime { Err(diagnostics) => { if !diagnostics.is_empty() { messages.lock().unwrap().extend( - diagnostics.into_iter().map(|diagnostic| { - Message::new(diagnostic, PossibleFixes::None) - }), + diagnostics + .into_iter() + .map(|diagnostic| { + Message::new(diagnostic, PossibleFixes::None) + .with_file_path(&module_to_lint.path) + }), ); } None @@ -694,8 +725,23 @@ impl Runtime { .insert(path.to_path_buf(), disable_directives); } + #[cfg(feature = "language_server")] + { + let annotated = section_messages.into_iter().map(|m| { + // Find section offset by span start. + let section_offset = section_bounds + .iter() + .find_map(|(s, e)| { + if m.span.start >= *s && m.span.start < *e { Some(*s) } else { None } + }) + .unwrap_or(0); + m.with_file_path(&module_to_lint.path).with_section_offset(section_offset) + }); + messages.lock().unwrap().extend(annotated); + } + #[cfg(not(feature = "language_server"))] messages.lock().unwrap().extend( - section_messages + section_messages.into_iter().map(|m| m.with_file_path(&module_to_lint.path)), ); }, ); @@ -720,6 +766,16 @@ impl Runtime { |allocator_guard, ModuleContentDependent { source_text: _, section_contents }| { assert_eq!(module.section_module_records.len(), section_contents.len()); + #[cfg(feature = "language_server")] + let section_bounds: Vec<(u32, u32)> = section_contents + .iter() + .map(|section| { + let start = section.source.start; + let end = start + section.source.source_text.len() as u32; + (start, end) + }) + .collect(); + let context_sub_hosts: Vec> = module .section_module_records .into_iter() @@ -733,13 +789,19 @@ impl Runtime { )), Err(errors) => { if !errors.is_empty() { - messages - .lock() - .unwrap() - .extend(errors - .into_iter() - .map(|err| Message::new(err, PossibleFixes::None)) - ); + #[cfg(feature = "language_server")] + messages.lock().unwrap().extend( + errors.into_iter().map(|err| { + Message::new(err, PossibleFixes::None) + .with_file_path(&module.path) + }), + ); + #[cfg(not(feature = "language_server"))] + messages.lock().unwrap().extend( + errors + .into_iter() + .map(|err| Message::new(err, PossibleFixes::None)), + ); } None } @@ -750,14 +812,26 @@ impl Runtime { return; } - messages.lock().unwrap().extend( - me.linter.run( - Path::new(&module.path), - context_sub_hosts, - allocator_guard - ) - , + let raw = me.linter.run( + Path::new(&module.path), + context_sub_hosts, + allocator_guard, ); + #[cfg(feature = "language_server")] + { + let annotated = raw.into_iter().map(|m| { + let section_offset = section_bounds + .iter() + .find_map(|(s, e)| { + if m.span.start >= *s && m.span.start < *e { Some(*s) } else { None } + }) + .unwrap_or(0); + m.with_file_path(&module.path).with_section_offset(section_offset) + }); + messages.lock().unwrap().extend(annotated); + } + #[cfg(not(feature = "language_server"))] + messages.lock().unwrap().extend(raw); }, ); }); diff --git a/crates/oxc_linter/src/tsgolint.rs b/crates/oxc_linter/src/tsgolint.rs index 3ec888bd84a28..8bfaf8886d913 100644 --- a/crates/oxc_linter/src/tsgolint.rs +++ b/crates/oxc_linter/src/tsgolint.rs @@ -295,13 +295,16 @@ impl TsGoLintState { #[cfg(feature = "language_server")] pub fn lint_source( &self, - path: &Arc, - source_text: String, - ) -> Result, String> { - let mut resolved_configs: FxHashMap = FxHashMap::default(); + paths: &[Arc], + ) -> Result)>, String> { + if paths.is_empty() { + return Ok(Vec::new()); + } - let json_input = self.json_input(std::slice::from_ref(path), &mut resolved_configs); + let mut resolved_configs: FxHashMap = FxHashMap::default(); + let json_input = self.json_input(paths, &mut resolved_configs); let executable_path = self.executable_path.clone(); + let silent = self.silent; let handler = std::thread::spawn(move || { let child = std::process::Command::new(&executable_path) @@ -321,100 +324,103 @@ impl TsGoLintState { }; let mut stdin = child.stdin.take().expect("Failed to open tsgolint stdin"); - - // Write the input synchronously and handle BrokenPipe gracefully in case the child - // exits early and closes its stdin. let json = serde_json::to_string(&json_input).expect("Failed to serialize JSON"); if let Err(e) = stdin.write_all(json.as_bytes()) { - // If the child closed stdin early, avoid crashing on SIGPIPE/BrokenPipe. if e.kind() != ErrorKind::BrokenPipe { return Err(format!("Failed to write to tsgolint stdin: {e}")); } } - // Explicitly drop stdin to send EOF to the child. drop(stdin); - // Stream diagnostics as they are emitted, rather than waiting for all output let stdout = child.stdout.take().expect("Failed to open tsgolint stdout"); - - let stdout_handler = std::thread::spawn(move || -> Result, String> { - let msg_iter = TsGoLintMessageStream::new(stdout); - - let mut result = vec![]; - - for msg in msg_iter { - match msg { - Ok(TsGoLintMessage::Error(err)) => { - return Err(err.error); - } - Ok(TsGoLintMessage::Diagnostic(tsgolint_diagnostic)) => { - let path = tsgolint_diagnostic.file_path.clone(); - let Some(resolved_config) = resolved_configs.get(&path) else { - // If we don't have a resolved config for this path, skip it. We should always - // have a resolved config though, since we processed them already above. - continue; - }; - - let severity = - resolved_config.rules.iter().find_map(|(rule, status)| { - if rule.name() == tsgolint_diagnostic.rule { - Some(*status) - } else { - None - } - }); - let Some(severity) = severity else { - // If the severity is not found, we should not report the diagnostic - continue; - }; - - let mut message = Message::from_tsgo_lint_diagnostic( - tsgolint_diagnostic, - &source_text, - ); - - message.error.severity = if severity == AllowWarnDeny::Deny { - Severity::Error - } else { - Severity::Warning - }; - - result.push(message); - } - Err(e) => { - return Err(e); + let stdout_handler = + std::thread::spawn(move || -> Result)>, String> { + let msg_iter = TsGoLintMessageStream::new(stdout); + let mut map: FxHashMap> = FxHashMap::default(); + let mut source_text_cache: FxHashMap = FxHashMap::default(); + + for msg in msg_iter { + match msg { + Ok(TsGoLintMessage::Error(err)) => return Err(err.error), + Ok(TsGoLintMessage::Diagnostic(diag)) => { + let path = diag.file_path.clone(); + let Some(resolved_config) = resolved_configs.get(&path) else { + continue; + }; + let severity = + resolved_config.rules.iter().find_map(|(rule, status)| { + if rule.name() == diag.rule { Some(*status) } else { None } + }); + let Some(severity) = severity else { continue }; + + let source_text: &str = if silent { + "" + } else if let Some(st) = source_text_cache.get(&path) { + st + } else { + let st = match read_to_string(&path) { + Ok(content) => content, + Err(e) => { + eprintln!( + "Failed to read file '{}': {}", + path.display(), + e + ); + String::new() + } + }; + source_text_cache.insert(path.clone(), st); + source_text_cache.get(&path).unwrap() + }; + + let mut message = + Message::from_tsgo_lint_diagnostic(diag, source_text); + message.error.severity = if severity == AllowWarnDeny::Deny { + Severity::Error + } else { + Severity::Warning + }; + map.entry(path).or_default().push(message); + } + Err(e) => return Err(e), } } - } - Ok(result) - }); + let mut entries: Vec<(PathBuf, Vec)> = map.into_iter().collect(); + entries.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + for (_, msgs) in &mut entries { + msgs.sort_unstable_by(|m1, m2| { + let s1 = m1.span; + let s2 = m2.span; + s1.start.cmp(&s2.start).then(s1.end.cmp(&s2.end)) + }); + } + Ok(entries) + }); - // Wait for process to complete and stdout processing to finish let exit_status = child.wait().expect("Failed to wait for tsgolint process"); let stdout_result = stdout_handler.join(); - if !exit_status.success() { return Err(format!("tsgolint process exited with status: {exit_status}")); } - match stdout_result { - Ok(Ok(diagnostics)) => Ok(diagnostics), + Ok(Ok(grouped)) => Ok(grouped), Ok(Err(err)) => Err(err), Err(_) => Err("Failed to join stdout processing thread".to_string()), } }); match handler.join() { - Ok(Ok(diagnostics)) => { - // Successfully ran tsgolint - Ok(diagnostics) - } + Ok(Ok(res)) => Ok(res), Ok(Err(err)) => Err(format!("Error running tsgolint: {err:?}")), Err(err) => Err(format!("Error running tsgolint: {err:?}")), } } + /// Batch lint multiple paths returning messages grouped per file. + /// Falls back to an error (caller should degrade to sequential) if process spawn or streaming fails. + #[cfg(feature = "language_server")] + /// Create a JSON input for STDIN of tsgolint in this format: /// /// ```json diff --git a/editors/vscode/client/WorkspaceConfig.ts b/editors/vscode/client/WorkspaceConfig.ts index ae53842396876..647d8cfee5003 100644 --- a/editors/vscode/client/WorkspaceConfig.ts +++ b/editors/vscode/client/WorkspaceConfig.ts @@ -75,6 +75,13 @@ export interface WorkspaceConfigInterface { * `oxc.fmt.configPath` */ ['fmt.configPath']?: string | null; + /** + * Enable workspace-wide linting (scan all files, not just opened ones) + * `oxc.lint.workspaceMode` + * + * @default false + */ + workspaceMode?: boolean; } export class WorkspaceConfig { @@ -86,6 +93,7 @@ export class WorkspaceConfig { private _flags: Record = {}; private _formattingExperimental: boolean = false; private _formattingConfigPath: string | null = null; + private _workspaceMode: boolean = false; constructor(private readonly workspace: WorkspaceFolder) { this.refresh(); @@ -109,6 +117,7 @@ export class WorkspaceConfig { this._flags = flags; this._formattingExperimental = this.configuration.get('fmt.experimental') ?? false; this._formattingConfigPath = this.configuration.get('fmt.configPath') ?? null; + this._workspaceMode = this.configuration.get('lint.workspaceMode') ?? false; } public effectsConfigChange(event: ConfigurationChangeEvent): boolean { @@ -136,6 +145,9 @@ export class WorkspaceConfig { if (event.affectsConfiguration(`${ConfigService.namespace}.fmt.configPath`, this.workspace)) { return true; } + if (event.affectsConfiguration(`${ConfigService.namespace}.lint.workspaceMode`, this.workspace)) { + return true; + } return false; } @@ -215,6 +227,15 @@ export class WorkspaceConfig { return this.configuration.update('fmt.configPath', value, ConfigurationTarget.WorkspaceFolder); } + get workspaceMode(): boolean { + return this._workspaceMode; + } + + updateWorkspaceMode(value: boolean): PromiseLike { + this._workspaceMode = value; + return this.configuration.update('lint.workspaceMode', value, ConfigurationTarget.WorkspaceFolder); + } + public toLanguageServerConfig(): WorkspaceConfigInterface { return { run: this.runTrigger, @@ -225,6 +246,7 @@ export class WorkspaceConfig { flags: this.flags, ['fmt.experimental']: this.formattingExperimental, ['fmt.configPath']: this.formattingConfigPath ?? null, + workspaceMode: this._workspaceMode, }; } } diff --git a/editors/vscode/client/extension.ts b/editors/vscode/client/extension.ts index 082f56bb506a0..6362d0114efcb 100644 --- a/editors/vscode/client/extension.ts +++ b/editors/vscode/client/extension.ts @@ -16,9 +16,11 @@ import { ExecuteCommandRequest, MessageType, ShowMessageNotification, + State, } from 'vscode-languageclient'; -import { Executable, LanguageClient, LanguageClientOptions, ServerOptions } from 'vscode-languageclient/node'; +import { Executable, LanguageClient, LanguageClientOptions, ServerOptions, StreamInfo } from 'vscode-languageclient/node'; +import * as net from 'node:net'; import { join } from 'node:path'; import { ConfigService } from './ConfigService'; @@ -28,6 +30,8 @@ const languageClientName = 'oxc'; const outputChannelName = 'Oxc'; const commandPrefix = 'oxc'; +const RESTART_DELAY_MS = 50; + const enum OxcCommands { RestartServer = `${commandPrefix}.restartServer`, ApplyAllFixesFile = `${commandPrefix}.applyAllFixesFile`, @@ -59,17 +63,36 @@ export async function activate(context: ExtensionContext) { return; } - try { - if (client.isRunning()) { - await client.restart(); - window.showInformationMessage('oxc server restarted.'); - } else { - await client.start(); + try { + + const state = (client as LanguageClient)?.state; + if (state === State.Starting) { + window.showWarningMessage('oxc server is still starting; try restart again in a moment.'); + return; + } + + if (client.isRunning()) { + const externalSocketSpec = process.env.OXC_LS_CONNECT; + // Use stop()+start() instead of restart() to avoid shutdown while starting error. + if (externalSocketSpec) { + // External socket: stop sends shutdown/exit internally; wait a tick for server loop. + await client.stop(); + await new Promise(r => setTimeout(r, RESTART_DELAY_MS)); + await client.start(); + } else { + // Spawned process: restart() is sufficient, but guard against transitional state. + await client.restart(); + } + window.showInformationMessage('oxc server restarted.'); + } else { + // Not running (stopped) -> start it. + await client.start(); + } + } catch (err) { + client.error('Restarting client failed', err, 'force'); } - } catch (err) { - client.error('Restarting client failed', err, 'force'); - } - }); + }, + ); const showOutputCommand = commands.registerCommand(OxcCommands.ShowOutputChannel, () => { client?.outputChannel?.show(); @@ -128,41 +151,105 @@ export async function activate(context: ExtensionContext) { ); async function findBinary(): Promise { - let bin = configService.getUserServerBinPath(); - if (workspace.isTrusted && bin) { + // 1. User configured path + const userBin = configService.getUserServerBinPath(); + if (workspace.isTrusted && userBin) { try { - await fsPromises.access(bin); - return bin; + await fsPromises.access(userBin); + outputChannel.info(`Using user configured oxc_language_server: ${userBin}`); + return userBin; } catch (e) { - outputChannel.error(`Invalid bin path: ${bin}`, e); + outputChannel.error(`Configured oxc.path.server not accessible: ${userBin}`, e); } } + const ext = process.platform === 'win32' ? '.exe' : ''; // NOTE: The `./target/release` path is aligned with the path defined in .github/workflows/release_vscode.yml - return process.env.SERVER_PATH_DEV ?? join(context.extensionPath, `./target/release/oxc_language_server${ext}`); + + const releaseCandidate = join(context.extensionPath, `./target/release/oxc_language_server${ext}`); + const debugCandidate = join(context.extensionPath, `./target/debug/oxc_language_server${ext}`); + const envCandidate = process.env.SERVER_PATH_DEV; + + const candidates = [envCandidate, releaseCandidate, debugCandidate].filter(Boolean) as string[]; + for (const candidate of candidates) { + try { + await fsPromises.access(candidate); + outputChannel.info(`Using detected oxc_language_server: ${candidate}`); + return candidate; + } catch { + // continue + } + } + + outputChannel.error( + `No oxc_language_server binary found. Tried: ${candidates.join(', ')}\n` + + 'Build one with: pnpm run server:build:release (or server:build:debug) in editors/vscode.' + ); + // Return release path as last resort (will still fail fast, but message is logged) + return releaseCandidate; } - const command = await findBinary(); - const run: Executable = { - command: command!, - options: { - env: { - ...process.env, - RUST_LOG: process.env.RUST_LOG || 'info', + // External socket mode: if OXC_LS_CONNECT is set, connect instead of spawning. + const externalSocketSpec = process.env.OXC_LS_CONNECT; + let serverOptions: ServerOptions; + if (externalSocketSpec) { + const socketPath = externalSocketSpec.replace(/^unix:/, ''); + outputChannel.info(`Connecting to external oxc_language_server socket: ${socketPath}`); + // Retry logic: attempt to connect several times with exponential backoff to avoid race condition. + const maxAttempts = 8; + const baseDelayMs = 75; + serverOptions = () => new Promise((resolve, reject) => { + let attempt = 0; + const tryConnect = () => { + attempt += 1; + const socket = net.createConnection(socketPath, () => { + outputChannel.info(`Connected to external language server after ${attempt} attempt(s).`); + resolve({ reader: socket, writer: socket }); + }); + socket.on('error', (err) => { + socket.destroy(); + if (attempt < maxAttempts) { + const delay = baseDelayMs * (2 ** (attempt - 1)); + outputChannel.info(`Language server not ready (attempt ${attempt}/${maxAttempts}). Retrying in ${delay}ms...`); + setTimeout(tryConnect, delay); + } else { + outputChannel.error(`Failed to connect to external language server after ${maxAttempts} attempts at ${socketPath}`, err); + reject(err); + } + }); + }; + tryConnect(); + }); + } else { + const command = await findBinary(); + const run: Executable = { + command: command!, + options: { + env: { + ...process.env, + RUST_LOG: process.env.RUST_LOG || 'info', + }, }, - }, - }; - const serverOptions: ServerOptions = { - run, - debug: run, - }; + }; + serverOptions = { + run, + debug: run, + }; + } // see https://github.com/oxc-project/oxc/blob/9b475ad05b750f99762d63094174be6f6fc3c0eb/crates/oxc_linter/src/loader/partial_loader/mod.rs#L17-L20 + // This list is also sent to the language server to avoid hard-coded duplication of extensions + // for workspace scanning & source file watchers. const supportedExtensions = ['astro', 'cjs', 'cts', 'js', 'jsx', 'mjs', 'mts', 'svelte', 'ts', 'tsx', 'vue']; - // If the extension is launched in debug mode then the debug server options are used - // Otherwise the run options are used - // Options to control the language client + // Helper to augment workspace configuration entries with supportedExtensions. + type WorkspaceConfigEntry = (typeof configService.languageServerConfig)[number]; + const withSupportedExtensions = (workspaces: WorkspaceConfigEntry[]) => + workspaces.map((ws: WorkspaceConfigEntry) => ({ + ...ws, + options: { ...ws.options, supportedExtensions }, + })); + let clientOptions: LanguageClientOptions = { // Register the server for plain text documents documentSelector: [ @@ -171,7 +258,7 @@ export async function activate(context: ExtensionContext) { scheme: 'file', }, ], - initializationOptions: configService.languageServerConfig, + initializationOptions: withSupportedExtensions(configService.languageServerConfig), outputChannel, traceOutputChannel: outputChannel, middleware: { @@ -254,12 +341,12 @@ export async function activate(context: ExtensionContext) { return; } - // update the initializationOptions for a possible restart - client.clientOptions.initializationOptions = this.languageServerConfig; + // update the initializationOptions for a possible restart (keep them augmented) + client.clientOptions.initializationOptions = withSupportedExtensions(this.languageServerConfig); if (configService.effectsWorkspaceConfigChange(event) && client.isRunning()) { await client.sendNotification('workspace/didChangeConfiguration', { - settings: this.languageServerConfig, + settings: withSupportedExtensions(configService.languageServerConfig), }); } }; diff --git a/editors/vscode/package.json b/editors/vscode/package.json index 83d11118c24bd..100dd388dbd7d 100644 --- a/editors/vscode/package.json +++ b/editors/vscode/package.json @@ -74,6 +74,13 @@ "default": "onType", "description": "Run the linter on save (onSave) or on type (onType)" }, + "oxc.lint.workspaceMode": { + "scope": "resource", + "type": "boolean", + "default": false, + "description": "Enable workspace-wide linting. When enabled, all files in the workspace will be analyzed, not just open files.", + "tags": ["experimental"] + }, "oxc.enable": { "type": "boolean", "default": true, @@ -157,7 +164,8 @@ "type": "boolean", "scope": "resource", "default": false, - "description": "Enable experimental formatting support. This feature is experimental and might not work as expected." + "description": "Enable experimental formatting support. This feature is experimental and might not work as expected.", + "tags": ["experimental"] }, "oxc.fmt.configPath": { "type": [ diff --git a/external/tsgolint b/external/tsgolint new file mode 160000 index 0000000000000..1a61b63a8b59d --- /dev/null +++ b/external/tsgolint @@ -0,0 +1 @@ +Subproject commit 1a61b63a8b59d6f76269bc32d8a9254cf88751c1