diff --git a/.cargo/config.toml b/.cargo/config.toml index 7e4e7a0f90..eb89fa1e55 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,8 +1,8 @@ [alias] xtask = "run --package xtask --" -# @fb-only -# @fb-only +# @fb-only: [build] +# @fb-only: target-dir = "../../../buck-out/elp" [profile.release] codegen-units = 1 diff --git a/.github/actions/build-edb/action.yml b/.github/actions/build-edb/action.yml new file mode 100644 index 0000000000..96f6046a8a --- /dev/null +++ b/.github/actions/build-edb/action.yml @@ -0,0 +1,28 @@ +name: 'Build EDB' +description: 'Build the EDB debugger from source' +inputs: + os: + required: true + target: + required: true + otp-version: + required: true + +runs: + using: composite + steps: + - name: Checkout EDB + uses: "actions/checkout@v4" + with: + repository: WhatsApp/edb + path: edb + ref: otp-28.0 + - name: Build EDB + run: rebar3 escriptize + shell: bash + working-directory: edb + - name: Upload EDB binary + uses: "actions/upload-artifact@v4" + with: + name: edb + path: edb/_build/default/bin/edb diff --git a/.github/actions/setup-erlang/action.yml b/.github/actions/setup-erlang/action.yml new file mode 100644 index 0000000000..5016eff3ec --- /dev/null +++ b/.github/actions/setup-erlang/action.yml @@ -0,0 +1,61 @@ +name: 'Setup Erlang/OTP' +description: 'Setup Erlang/OTP + rebar3' +inputs: + os: + required: true + otp-version: + required: true + brew-otp-version: + required: true + +runs: + using: composite + steps: + - name: Install Erlang/OTP (Linux, Windows) + if: inputs.os == 'linux' || inputs.os == 'windows' + uses: erlef/setup-beam@v1 + with: + otp-version: ${{ inputs.otp-version }} + install-rebar: false + install-hex: false + - name: Install Erlang/OTP (MacOS Only) + if: inputs.os == 'macos' + run: brew install erlang@${{ inputs.brew-otp-version }} + shell: bash + - name: Add erl to path (MacOS Only) + if: inputs.os == 'macos' + run: | + echo '/opt/homebrew/opt/erlang@${{ inputs.brew-otp-version }}/bin' >> $GITHUB_PATH + echo '/usr/local/opt/erlang@${{ inputs.brew-otp-version }}/bin' >> $GITHUB_PATH + shell: bash + - name: Verify Erlang version + run: erl -eval 'erlang:display(erlang:system_info(otp_release)), halt().' -noshell + shell: bash + - name: Install rebar3 + run: "mkdir rebar3 && curl https://s3.amazonaws.com/rebar3/rebar3 -o rebar3/rebar3 && chmod +x rebar3/rebar3" + shell: bash + - name: Create rebar3.cmd (Windows Only) + if: inputs.os == 'windows' + working-directory: rebar3 + run: | + echo '@echo off' > rebar3.cmd + echo 'setlocal' >> rebar3.cmd + echo 'set rebarscript=%~f0' >> rebar3.cmd + echo 'escript.exe "%rebarscript:.cmd=%" %*' >> rebar3.cmd + shell: pwsh + - name: Add rebar3 to path (No Windows) + if: inputs.os != 'windows' + run: 'echo "$GITHUB_WORKSPACE/rebar3" >> $GITHUB_PATH' + shell: bash + - name: Add rebar3 to path (Windows Only) + if: inputs.os == 'windows' + run: '"$env:GITHUB_WORKSPACE\rebar3" | Out-File -FilePath "$env:GITHUB_PATH" -Append' + shell: pwsh + - name: Verify rebar3 version (No Windows) + if: inputs.os != 'windows' + run: rebar3 version + shell: bash + - name: Verify rebar3 version (Windows Only) + if: inputs.os == 'windows' + run: rebar3.cmd version + shell: cmd diff --git a/.github/workflows/build-website.yml b/.github/workflows/build-website.yml index 62072bac4f..54d9750971 100644 --- a/.github/workflows/build-website.yml +++ b/.github/workflows/build-website.yml @@ -24,4 +24,4 @@ jobs: - name: Install dependencies run: yarn install --frozen-lockfile - name: Build website - run: yarn build + run: yarn build-oss diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d61805218c..e9980283f2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,29 +1,50 @@ name: ELP CI on: push: {} + pull_request: + branches: + - main release: types: [published] -env: - EQWALIZER_DIR: ${{ github.workspace }}/eqwalizer/eqwalizer - ELP_EQWALIZER_PATH: ${{ github.workspace }}/eqwalizer/eqwalizer/eqwalizer jobs: + edb: + runs-on: ubuntu-latest + steps: + - name: Checkout erlang-language-platform + uses: "actions/checkout@v3" + - id: setup-erlang + uses: ./.github/actions/setup-erlang + with: + os: linux + otp-version: 27.3 + brew-otp-version: 27 + - id: build-edb + uses: ./.github/actions/build-edb + with: + os: ${{ matrix.os }} + target: ${{ matrix.target }} + otp-version: ${{ matrix.otp-version}} + ci: strategy: fail-fast: false matrix: - platform-arch: [ubuntu-22.04-x64, ubuntu-22.04-arm, macos-13-x64, macos-latest-arm] + platform-arch: [ubuntu-22.04-x64, ubuntu-22.04-arm, macos-15-x64, macos-latest-arm, windows-2022-x64] otp-version: [26.2, 27.3, 28.0] include: - otp-version: 26.2 brew-otp-version: 26 vscode-publish: true + choco-otp-version: 26.2.5.13 - otp-version: 27.3 brew-otp-version: 27 vscode-publish: false + choco-otp-version: 27.3.4 - otp-version: 28.0 brew-otp-version: 28 vscode-publish: false + choco-otp-version: 28.0.1 - platform-arch: ubuntu-22.04-x64 platform: ubuntu-22.04 os: linux @@ -34,8 +55,8 @@ jobs: os: linux target: aarch64-unknown-linux-gnu vscode-target: linux-arm64 - - platform-arch: macos-13-x64 - platform: macos-13 + - platform-arch: macos-15-x64 + platform: macos-15-intel os: macos target: x86_64-apple-darwin vscode-target: darwin-x64 @@ -44,16 +65,18 @@ jobs: os: macos target: aarch64-apple-darwin vscode-target: darwin-arm64 + - platform-arch: windows-2022-x64 + platform: windows-2022 + os: windows + target: x86_64-pc-windows-msvc + vscode-target: win32-x64 runs-on: ${{ matrix.platform }} + needs: edb steps: - name: Checkout erlang-language-platform uses: "actions/checkout@v3" - - name: Checkout eqwalizer - uses: "actions/checkout@v3" with: - repository: WhatsApp/eqwalizer - path: eqwalizer - ref: main + submodules: true - name: Set up GraalVM uses: graalvm/setup-graalvm@v1 with: @@ -68,62 +91,95 @@ jobs: uses: dtolnay/rust-toolchain@stable with: target: ${{ matrix.target }} + components: rustfmt - name: Set up cross-compiler if: matrix.platform-arch == 'ubuntu-22.04-arm' run: | sudo apt-get update sudo apt-get install -y crossbuild-essential-arm64 - - name: Install Erlang/OTP (Linux Only) - if: matrix.os == 'linux' - uses: erlef/setup-beam@v1 + - name: Install Buck2 + uses: dtolnay/install-buck2@latest + - id: setup-erlang + uses: ./.github/actions/setup-erlang with: + os: ${{ matrix.os }} otp-version: ${{ matrix.otp-version }} - install-rebar: false - install-hex: false - - name: Install Erlang/OTP (MacOS Only) - if: matrix.os == 'macos' - run: brew install erlang@${{ matrix.brew-otp-version }} - - name: Add erl to path (MacOS Only) - if: matrix.os == 'macos' - run: | - echo '/opt/homebrew/opt/erlang@${{ matrix.brew-otp-version }}/bin' >> $GITHUB_PATH - echo '/usr/local/opt/erlang@${{ matrix.brew-otp-version }}/bin' >> $GITHUB_PATH - - name: Verify Erlang version - run: erl -eval 'erlang:display(erlang:system_info(otp_release)), halt().' -noshell - - name: Install rebar3 - run: "mkdir rebar3 && curl https://s3.amazonaws.com/rebar3/rebar3 -o rebar3/rebar3 && chmod +x rebar3/rebar3" - - name: Add rebar3 to path - run: 'echo "$GITHUB_WORKSPACE/rebar3" >> $GITHUB_PATH' - - name: Verify rebar3 version - run: rebar3 version - - name: Assemble eqwalizer.jar + brew-otp-version: ${{ matrix.brew-otp-version}} + - name: Assemble eqwalizer.jar (No Windows) + if: matrix.os != 'windows' working-directory: eqwalizer/eqwalizer run: "sbt assembly" - - name: Assemble eqwalizer binary + - name: Assemble eqwalizer.jar (Windows Only) + if: matrix.os == 'windows' + working-directory: eqwalizer\eqwalizer + run: "sbt assembly" + shell: bash + - name: Assemble eqwalizer binary (No Windows) + if: matrix.os != 'windows' working-directory: eqwalizer/eqwalizer run: 'native-image -H:IncludeResources=application.conf --no-server --no-fallback -jar target/scala-3.6.4/eqwalizer.jar eqwalizer' + - name: Assemble eqwalizer binary (Windows Only) + if: matrix.os == 'windows' + working-directory: eqwalizer\eqwalizer + run: 'native-image -H:IncludeResources=application.conf --no-server --no-fallback -jar target\scala-3.6.4\eqwalizer.jar eqwalizer' - name: Ensure elp is formatted run: 'cargo fmt -- --check' + - name: Configure Environment (No Windows) + if: matrix.os != 'windows' + run: | + echo "EQWALIZER_DIR=${{ github.workspace }}/eqwalizer/eqwalizer" >> $GITHUB_ENV + echo "ELP_EQWALIZER_PATH=${{ github.workspace }}/eqwalizer/eqwalizer/eqwalizer" >> $GITHUB_ENV + - name: Configure Environment (Windows Only) + if: matrix.os == 'windows' + run: | + echo "EQWALIZER_DIR=${{ github.workspace }}\eqwalizer\eqwalizer" >> $env:GITHUB_ENV + echo "ELP_EQWALIZER_PATH=${{ github.workspace }}\eqwalizer\eqwalizer\eqwalizer.exe" >> $env:GITHUB_ENV - name: Test elp - # Do not run the tests in case of cross-compilation - if: matrix.platform-arch != 'macos-latest-arm' - run: 'cargo test --no-default-features --workspace --target ${{ matrix.target }}' - - name: Build elp + # Do not run the tests in case of cross-compilation or on Windows + if: matrix.platform-arch != 'macos-latest-arm' && matrix.os != 'windows' + run: 'cargo test --workspace --target ${{ matrix.target }}' + - name: Build elp (No Windows) + if: matrix.os != 'windows' run: 'cargo build --release --target ${{ matrix.target }} --config target.aarch64-unknown-linux-gnu.linker=\"aarch64-linux-gnu-gcc\"' - - name: Add elp to path + - name: Build elp (Windows Only) + if: matrix.os == 'windows' + run: 'cargo build --release --target ${{ matrix.target }}' + - name: Add elp to path (No Windows) + if: matrix.os != 'windows' run: 'echo "$GITHUB_WORKSPACE/target/${{ matrix.target}}/release" >> $GITHUB_PATH' - - name: Upload elp binary + - name: Add elp to path (Windows Only) + if: matrix.os == 'windows' + run: '"$env:GITHUB_WORKSPACE\target\${{ matrix.target }}\release" | Out-File -FilePath "$env:GITHUB_PATH" -Append' + - name: Upload elp binary (No Windows) + if: matrix.os != 'windows' uses: "actions/upload-artifact@v4" with: name: elp-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }} - path: target/${{ matrix.target}}/release/elp - - name: Upload eqwalizer native binary + path: target/${{ matrix.target }}/release/elp + - name: Upload elp binary (Windows Only) + if: matrix.os == 'windows' + uses: "actions/upload-artifact@v4" + with: + name: elp-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }} + path: target\${{ matrix.target }}\release\elp.exe + - name: Upload eqwalizer native binary (No Windows) + if: matrix.os != 'windows' uses: "actions/upload-artifact@v4" with: name: eqwalizer-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }} path: ./eqwalizer/eqwalizer/eqwalizer - - name: Make elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz + - name: Upload eqwalizer native binary (Windows Only) + if: matrix.os == 'windows' + uses: "actions/upload-artifact@v4" + with: + name: eqwalizer-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }} + path: .\eqwalizer\eqwalizer\eqwalizer.exe + - name: Make elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz (No Windows) + if: matrix.os != 'windows' run: 'tar -zcvf elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz -C target/${{ matrix.target}}/release/ elp' + - name: Make elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz (Windows Only) + if: matrix.os == 'windows' + run: 'tar -zcvf elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz -C target\${{ matrix.target}}\release elp.exe' - env: GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" id: get_release_url @@ -143,15 +199,46 @@ jobs: - name: Setup Node uses: actions/setup-node@v3 with: - node-version: 18 + node-version: 20 - name: Install VSCE run: npm install -g vsce - - name: Prepare VS Code Extension to host binaries + - name: Install OVSX + run: npm install -g ovsx + - name: Prepare VS Code Extension to host binaries (No Windows) + if: matrix.os != 'windows' run: mkdir -p editors/code/bin - - name: Package eqWAlizer binary into VS Code Extension + - name: Prepare VS Code Extension to host binaries (Windows Only) + if: matrix.os == 'windows' + run: if not exist "editors\code\bin" mkdir "editors\code\bin" + shell: cmd + - name: Fetch EDB escript + uses: actions/download-artifact@v4 + with: + name: edb + - name: Ensure escript is executable + run: chmod +x edb + shell: bash + - name: Verify EDB escript + run: ./edb -h || true + shell: bash + - name: Package EDB escript into VS Code Extension (No Windows) + if: matrix.os != 'windows' + run: cp edb editors/code/bin + - name: Package EDB escript into VS Code Extension (Windows Only) + if: matrix.os == 'windows' + run: cp edb editors\code\bin + - name: Package eqWAlizer binary into VS Code Extension (No Windows) + if: matrix.os != 'windows' run: cp eqwalizer/eqwalizer/eqwalizer editors/code/bin - - name: Package ELP binary into VS Code Extension + - name: Package eqWAlizer binary into VS Code Extension (Windows Only) + if: matrix.os == 'windows' + run: cp eqwalizer\eqwalizer\eqwalizer.exe editors\code\bin + - name: Package ELP binary into VS Code Extension (No Windows) + if: matrix.os != 'windows' run: cp target/${{ matrix.target}}/release/elp editors/code/bin + - name: Package ELP binary into VS Code Extension (Windows Only) + if: matrix.os == 'windows' + run: cp target\${{ matrix.target}}\release\elp.exe editors\code\bin - name: Ensure binaries are executable run: chmod +x editors/code/bin/* - name: npm install @@ -166,22 +253,43 @@ jobs: - name: Rename Package working-directory: editors/code run: mv erlang-language-platform-*.vsix erlang-language-platform.vsix - - name: Upload Extension + - name: Upload Extension (No Windows) + if: matrix.os != 'windows' uses: "actions/upload-artifact@v4" with: name: elp-${{ matrix.os}}-${{ matrix.target }}-otp-${{ matrix.otp-version }}.vsix path: editors/code/erlang-language-platform.vsix + - name: Upload Extension (Windows Only) + if: matrix.os == 'windows' + uses: "actions/upload-artifact@v4" + with: + name: elp-${{ matrix.os}}-${{ matrix.target }}-otp-${{ matrix.otp-version }}.vsix + path: editors\code\erlang-language-platform.vsix - env: GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - name: Upload Extension Package - if: ${{ github.event_name == 'release' }} + name: Upload Extension Package (No Windows) + if: ${{ github.event_name == 'release' && matrix.os != 'windows' }} uses: "actions/upload-release-asset@v1.0.2" with: asset_content_type: application/octet-stream asset_name: elp-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }}.vsix asset_path: editors/code/erlang-language-platform.vsix upload_url: "${{ steps.get_release_url.outputs.upload_url }}" + - env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + name: Upload Extension Package (Windows Only) + if: ${{ github.event_name == 'release' && matrix.os == 'windows' }} + uses: "actions/upload-release-asset@v1.0.2" + with: + asset_content_type: application/octet-stream + asset_name: elp-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }}.vsix + asset_path: editors\code\erlang-language-platform.vsix + upload_url: "${{ steps.get_release_url.outputs.upload_url }}" - name: Publish extension to marketplace working-directory: editors/code - if: ${{ github.event_name == 'release' && matrix.vscode-publish }} + if: ${{ github.event_name == 'release' && matrix.vscode-publish && matrix.os != 'windows' }} run: vsce publish -p ${{ secrets.VSCE_PAT }} --packagePath erlang-language-platform.vsix + - name: Publish extension to OpenVSX marketplace + working-directory: editors/code + if: ${{ github.event_name == 'release' && matrix.vscode-publish && matrix.os != 'windows' }} + run: ovsx publish -p ${{ secrets.OVSX_PAT }} --packagePath erlang-language-platform.vsix diff --git a/.github/workflows/deploy-website.yml b/.github/workflows/deploy-website.yml index 679cd80c89..97d9454702 100644 --- a/.github/workflows/deploy-website.yml +++ b/.github/workflows/deploy-website.yml @@ -24,7 +24,7 @@ jobs: - name: Install dependencies run: yarn install --frozen-lockfile - name: Build website - run: yarn build + run: yarn build-oss # Popular action to deploy to GitHub Pages: # Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..1445b3a80b --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "eqwalizer"] + path = eqwalizer + url = https://github.com/WhatsApp/eqwalizer diff --git a/.llms/rules/elp_development.md b/.llms/rules/elp_development.md index c2e52eec1a..efd132ac4b 100644 --- a/.llms/rules/elp_development.md +++ b/.llms/rules/elp_development.md @@ -1,20 +1,50 @@ -# ELP Development Rules for LLMs +--- +llms-gk: 'devmate_elp_development_md' +apply_to_regex: '^(.*\.rs|.*\.md)$' +oncalls: ['vscode_erlang'] +--- +# ELP Development Rules for LLMs (OSS) ## Project Overview -ELP (Erlang Language Platform) is a language server and development tools suite for Erlang, built in Rust. This project provides IDE features, diagnostics, and code analysis for Erlang codebases. + +ELP (Erlang Language Platform) is a language server and development tools suite +for Erlang, built in Rust. This project provides IDE features, diagnostics, and +code analysis for Erlang codebases. + +## Build System + +Use standard Cargo commands: + +```bash +# Build +cargo build --release + +# Run tests +cargo test --workspace + +# Run clippy +cargo clippy --tests + +# Format code +cargo fmt + +# Code generation +cargo xtask codegen +``` ## Diagnostic Code Management ### Adding New Diagnostic Codes + When adding new diagnostic codes to `DiagnosticCode` enum: -1. **Naming Convention**: Use descriptive PascalCase names that clearly indicate the issue +1. **Naming Convention**: Use descriptive PascalCase names that clearly indicate + the issue - Good: `UnusedFunctionArg`, `MissingCompileWarnMissingSpec` - Bad: `Error1`, `BadCode` 2. **Code Assignment**: Follow the established numbering scheme - `W0000-W9999`: Native ELP diagnostics, visible in the OSS version - - `WA000-WA999`: WhatsApp-specific warnings, only visible in Meta builds - Use the next available number in the appropriate range - Never change the number of an existing diagnostic code - Never change the label of an existing diagnostic code @@ -28,7 +58,8 @@ When adding new diagnostic codes to `DiagnosticCode` enum: 4. **Documentation**: Add comments explaining complex diagnostic codes -5. **Documentation File**: Create a corresponding documentation file in the website +5. **Documentation File**: Create a corresponding documentation file in the + website - Location: `website/docs/erlang-error-index/{namespace}/{code}.md` - Example: `W0051` → `website/docs/erlang-error-index/w/W0051.md` - Include frontmatter with `sidebar_position` matching the code number @@ -41,16 +72,20 @@ When adding new diagnostic codes to `DiagnosticCode` enum: - The `as_uri()` method automatically generates URLs pointing to these docs ### Creating DiagnosticDescriptor -Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines when and how the diagnostic runs: -1. **Static Descriptor Declaration**: Create a public static descriptor in your diagnostic module +Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines +when and how the diagnostic runs: + +1. **Static Descriptor Declaration**: Create a public static descriptor in your + diagnostic module - Use `pub(crate) static DESCRIPTOR: DiagnosticDescriptor` pattern - Define `DiagnosticConditions` with appropriate flags - Provide a checker function that implements the diagnostic logic 2. **Diagnostic Conditions**: Configure when the diagnostic should run - `experimental`: Mark as true for experimental/unstable diagnostics - - `include_generated`: Set to false if diagnostic shouldn't run on generated code + - `include_generated`: Set to false if diagnostic shouldn't run on generated + code - `include_tests`: Set to false if diagnostic shouldn't run on test files - `default_disabled`: Set to true if diagnostic requires explicit enabling @@ -59,7 +94,8 @@ Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines w - Push diagnostics to the `diags` vector using `Diagnostic::new()` - Use helper functions to keep the checker clean and focused -4. **Registration**: Add the descriptor to `diagnostics_descriptors()` function in `diagnostics.rs` +4. **Registration**: Add the descriptor to `diagnostics_descriptors()` function + in `diagnostics.rs` - Include your module's `DESCRIPTOR` in the returned vector 5. **Module Structure**: Follow the established pattern @@ -68,29 +104,28 @@ Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines w - Include comprehensive tests with `#[cfg(test)]` - Use SSR patterns when appropriate for complex matching -### Meta-Only vs OSS Code -- Use `@fb-only` and `@oss-only` comments to mark platform-specific code -- Meta-only diagnostics should use `MetaOnlyDiagnosticCode` wrapper -- Ensure OSS builds work by providing fallbacks for Meta-only features - ## Rust Code Style ### Error Handling + - Use `Result` for fallible operations - Prefer `?` operator over explicit match for error propagation - Use descriptive error messages with context ### Pattern Matching + - Use exhaustive matches for enums to catch new variants at compile time - Add explicit comments when intentionally using catch-all patterns - Prefer early returns to reduce nesting ### String Handling + - Use `&str` for borrowed strings, `String` for owned - Use `format!()` for complex string formatting - Use `to_string()` for simple conversions ### Collections + - Use `FxHashMap` instead of `std::HashMap` for better performance - Use `lazy_static!` for expensive static computations - Prefer iterators over manual loops where possible @@ -98,28 +133,116 @@ Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines w ## Testing Guidelines ### Test Structure + - Use `expect_test` for snapshot testing of complex outputs - Group related tests in the same module - Use descriptive test names that explain the scenario +### Declarative Test Fixtures + +ELP uses a declarative test fixture system that allows you to write tests with +inline annotations and markers directly in test strings. This system is defined +in `crates/project_model/src/test_fixture.rs`. + +#### Key Features + +1. **File Organization**: Use `//- /path/to/file.erl` to define multiple files + in a single test +2. **Metadata Markers**: Specify app names, include paths, OTP apps, etc. using + metadata after the path +3. **Annotations**: Mark expected diagnostics or ranges using `%% ^^^` syntax +4. **Cursors and Ranges**: Use `~` markers to indicate positions or ranges in + test code + +#### Annotation Syntax + +Annotations allow you to mark expected diagnostics, types, or other information +directly in test code: + +- **Basic annotation**: `%% ^^^ some text` - Points to the range above matching + the caret length +- **Top-of-file marker**: `%% <<< text` (at file start) - Creates annotation at + position 0..0 +- **File-wide annotation**: `%% ^^^file text` - Annotation spans the entire file + contents +- **Left-margin annotation**: `%%<^^^ text` - Annotation starts at `%%` position + instead of first `^` +- **Multiline annotations**: Use continuation lines with `%% | next line` + - Continuation lines are particularly useful for diagnostics with related information: + ```erlang + foo() -> syntax error oops. + %% ^^^^^ error: P1711: syntax error before: error + %% | Related info: 0:45-50 function foo/0 undefined + ``` + +#### Example Test Fixture + +```rust +let fixture = r#" +//- /src/main.erl +-module(main). + +foo( -> ok. %% +%% ^ error: W0004: Missing ')'~ +"#; +``` + ### Test Data + - Create minimal test cases that focus on specific functionality - Use realistic Erlang code examples in tests - Test both positive and negative cases -### Existing tests -- Do not change existing tests without asking +### Running Tests for Specific Crates +When running tests for a specific crate, you need to specify the crate name, not +the directory name. The mapping is: + +| Crate Name | Directory Name | +| -------------------- | ----------------------- | +| `elp` | `crates/elp` | +| `elp_base_db` | `crates/base_db` | +| `elp_eqwalizer` | `crates/eqwalizer` | +| `elp_erlang_service` | `crates/erlang_service` | +| `elp_ide` | `crates/ide` | +| `elp_ide_assists` | `crates/ide_assists` | +| `elp_ide_completion` | `crates/ide_completion` | +| `elp_ide_db` | `crates/ide_db` | +| `elp_ide_ssr` | `crates/ide_ssr` | +| `elp_log` | `crates/elp_log` | +| `elp_project_model` | `crates/project_model` | +| `elp_syntax` | `crates/syntax` | +| `elp_text_edit` | `crates/text_edit` | +| `elp_types_db` | `crates/types_db` | +| `hir` | `crates/hir` | + +Example: To run tests for the `elp_ide` crate: + +```bash +cargo test -p elp_ide +``` + +Or to run tests in a specific directory: + +```bash +cargo test --manifest-path crates/ide/Cargo.toml +``` + +### Existing tests + +- Do not change existing tests without asking ## Documentation ### Code Comments + - Document complex algorithms and business logic - Explain WHY, not just WHAT the code does - Use `///` for public API documentation - Use `//` for internal implementation notes ### Error Messages + - Make error messages actionable and user-friendly - Include context about what was expected vs. what was found - Provide suggestions for fixing the issue when possible @@ -127,11 +250,13 @@ Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines w ## Performance Considerations ### Memory Usage + - Use `Box` for large enum variants to keep enum size small - Consider using `Cow` for strings that might be borrowed or owned - Use `Arc` for shared immutable data ### Computation + - Cache expensive computations using `lazy_static!` or `once_cell` - Use appropriate data structures (HashMap for lookups, Vec for sequences) - Profile code paths that handle large Erlang codebases @@ -139,11 +264,13 @@ Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines w ## Integration Guidelines ### Erlang Service Integration + - Handle Erlang service errors gracefully - Use appropriate namespaces for different error sources - Maintain backward compatibility with existing error codes ### IDE Integration + - Provide rich diagnostic information (ranges, severity, fixes) - Support quick fixes and code actions where appropriate - Ensure diagnostics are fast enough for real-time feedback @@ -151,16 +278,19 @@ Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines w ## Maintenance ### Backward Compatibility + - Don't change existing diagnostic codes or their meanings - Deprecate old codes before removing them - Maintain serialization compatibility for configuration files ### Code Organization + - Keep related functionality together in modules - Use clear module boundaries and public APIs - Minimize dependencies between modules ### Version Management + - Follow semantic versioning for public APIs - Document breaking changes in release notes - Provide migration guides for major changes @@ -168,27 +298,25 @@ Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines w ## Common Patterns ### Regex Usage + - Use `lazy_static!` for compiled regexes - Prefer specific patterns over overly broad ones - Test regex patterns thoroughly with edge cases ### Configuration + - Support both code-based and label-based diagnostic references - Use serde for serialization/deserialization - Provide sensible defaults for all configuration options ### Error Recovery + - Continue processing after encountering errors when possible - Collect multiple errors rather than failing on the first one - Provide partial results when full analysis isn't possible -### Tools - -- ELP uses a cargo workspace. -- Inside Meta, use `./meta/cargo.sh` instead of `cargo` -- Inside Meta, use `./meta/clippy.sh` to run clippy -- Use `arc lint --apply-patches` for formatting. - ### Process -- Always run tests before finishing. -- Always run `./meta/cargo.sh clippy --tests` before submitting a diff + +- Always run tests before finishing +- Always run `cargo clippy --tests` before submitting PRs +- Use `cargo fmt` for code formatting diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 0000000000..7572a84e98 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,95 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "ELP: build (debug)", + "type": "shell", + // @fb-only: "command": "./meta/cargo.sh build", + "command": "cargo build", // @oss-only + "group": { + "kind": "build", + "is_default": true, + + }, + "presentation": { + "reveal": "always", + "panel": "new" + } + }, + { + "label": "ELP: build (release)", + "type": "shell", + // @fb-only: "command": "./meta/cargo.sh build --release", + "command": "cargo build --release", // @oss-only + "group": { + "kind": "build", + "is_default": true, + + }, + "presentation": { + "reveal": "always", + "panel": "new" + } + }, + { + "label": "ELP: build (release-thin)", + "type": "shell", + // @fb-only: "command": "./meta/cargo.sh build --profile release-thin --bins", + "command": "cargo build --profile release-thin --bins", // @oss-only + "group": { + "kind": "build", + "is_default": true, + + }, + "presentation": { + "reveal": "always", + "panel": "new" + } + }, + { + "label": "ELP: run clippy on workspace", + "type": "shell", + // @fb-only: "command": "./meta/clippy.sh --workspace --tests", + "command": "cargo clippy --workspace --tests", // @oss-only + "group": { + "kind": "build", + "is_default": true, + + }, + "presentation": { + "reveal": "always", + "panel": "new" + } + }, + { + "label": "ELP: run clippy on workspace, apply fixes", + "type": "shell", + // @fb-only: "command": "./meta/clippy.sh --workspace --tests --fix", + "command": "cargo clippy --workspace --tests --fix", // @oss-only + "group": { + "kind": "build", + "is_default": true, + + }, + "presentation": { + "reveal": "always", + "panel": "new" + } + }, + { + "label": "ELP: run tests on workspace", + "type": "shell", + // @fb-only: "command": "./meta/cargo.sh test --workspace", + "command": "cargo test --workspace", // @oss-only + "group": { + "kind": "build", + "is_default": true, + + }, + "presentation": { + "reveal": "always", + "panel": "new" + } + }, + ] +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f9ab793705..f84552a442 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,7 +20,7 @@ We actively welcome your pull requests. 1. Fork the repo and create your branch from `main`. 2. If you've added code that should be tested, add tests. -3. Ensure the test suite passes. +3. Ensure the test suite passes and that the code is formatted correctly (`cargo fmt -- --check`) 4. If you haven't already, complete the Contributor License Agreement ("CLA"). ## Contributor License Agreement ("CLA") diff --git a/Cargo.lock b/Cargo.lock index a03c78f3cc..2da9907eca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -446,10 +446,10 @@ dependencies = [ "crossbeam-channel", "elp_eqwalizer", "elp_ide", + "elp_ide_db", "elp_log", "elp_project_model", "elp_syntax", - "elp_text_edit", "env_logger", "expect-test", "fs_extra", @@ -572,7 +572,6 @@ dependencies = [ "elp_ide_ssr", "elp_project_model", "elp_syntax", - "elp_text_edit", "elp_types_db", "env_logger", "expect-test", @@ -604,7 +603,6 @@ dependencies = [ "cov-mark", "elp_ide_db", "elp_syntax", - "elp_text_edit", "expect-test", "fxhash", "hir", @@ -637,6 +635,7 @@ name = "elp_ide_db" version = "1.1.0" dependencies = [ "anyhow", + "cov-mark", "eetf", "either", "elp_base_db", @@ -644,12 +643,12 @@ dependencies = [ "elp_erlang_service", "elp_project_model", "elp_syntax", - "elp_text_edit", "elp_types_db", "expect-test", "fxhash", "hir", "indexmap 2.9.0", + "itertools 0.10.5", "lazy_static", "log", "memchr", @@ -664,6 +663,7 @@ dependencies = [ "strum", "strum_macros", "tempfile", + "text-size", "toml", "tracing", ] @@ -734,10 +734,8 @@ dependencies = [ name = "elp_syntax" version = "1.1.0" dependencies = [ - "cov-mark", "eetf", "elp_ide_db", - "elp_text_edit", "expect-test", "fxhash", "indexmap 2.9.0", @@ -757,14 +755,6 @@ dependencies = [ "tree-sitter-erlang", ] -[[package]] -name = "elp_text_edit" -version = "1.1.0" -dependencies = [ - "itertools 0.10.5", - "text-size", -] - [[package]] name = "elp_types_db" version = "1.1.0" @@ -2534,10 +2524,11 @@ dependencies = [ [[package]] name = "tree-sitter-erlang" -version = "0.14.0" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2091cce4eda19c03d77928c608ac6617445a6a25691dde1e93ac0102467a6be" dependencies = [ "cc", - "tree-sitter", "tree-sitter-language", ] diff --git a/Cargo.toml b/Cargo.toml index 7ec49a5f83..825530fe4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,13 +30,9 @@ elp_ide_ssr = { path = "./crates/ide_ssr" } elp_log = { path = "./crates/elp_log" } elp_project_model = { path = "./crates/project_model" } elp_syntax = { path = "./crates/syntax" } -elp_text_edit = { path = "./crates/text_edit" } elp_types_db = { path = "./crates/types_db" } hir = { path = "./crates/hir" } -# Forks -erl_ast = { path = "./crates/erl_ast" } - # External crates trie-rs = "0.4.2" always-assert = "0.1.3" @@ -112,8 +108,9 @@ threadpool = "1.8.1" timeout-readwrite = "0.3.3" toml = "0.5" tree-sitter = "0.23.2" -# @fb-only -tree-sitter-erlang = "0.14.0" # @oss-only +# When developing the grammar, you may want to point to a local version +# tree-sitter-erlang = { path = "./tree-sitter-erlang" } +tree-sitter-erlang = "0.15.0" url = "2.5.4" ustr = { version = "1.1.0", features = ["serde"] } vfs = { git = "https://github.com/rust-lang/rust-analyzer", rev = "2025-03-04" } diff --git a/bench_runner/example_bench/benches/main.rs b/bench_runner/example_bench/benches/main.rs deleted file mode 100644 index 6b2733b5b9..0000000000 --- a/bench_runner/example_bench/benches/main.rs +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is dual-licensed under either the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree or the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. You may select, at your option, one of the - * above-listed licenses. - */ - -use std::thread; -use std::time; - -use criterion::BenchmarkId; -use criterion::Criterion; -use criterion::criterion_group; -use criterion::criterion_main; - -fn fibonacci_slow(n: u64) -> u64 { - match n { - 0 => 1, - 1 => 1, - n => fibonacci_slow(n - 1) + fibonacci_slow(n - 2), - } -} - -fn fibonacci_fast(n: u64) -> u64 { - let mut a = 0; - let mut b = 1; - let millis = time::Duration::from_millis(12); - thread::sleep(millis); - - match n { - 0 => b, - _ => { - for _ in 0..n { - let c = a + b; - a = b; - b = c; - } - b - } - } -} - -fn bench_fibs(c: &mut Criterion) { - let mut group = c.benchmark_group("Fibonacci"); - for i in [20u64, 21u64].iter() { - group.bench_with_input(BenchmarkId::new("Recursive", i), i, |b, i| { - b.iter(|| fibonacci_slow(*i)) - }); - group.bench_with_input(BenchmarkId::new("Iterative", i), i, |b, i| { - b.iter(|| fibonacci_fast(*i)) - }); - } - group.finish(); -} - -criterion_group!(benches, bench_fibs); -criterion_main!(benches); diff --git a/bench_runner/runner/main.rs b/bench_runner/runner/main.rs deleted file mode 100644 index f895c08c52..0000000000 --- a/bench_runner/runner/main.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is dual-licensed under either the MIT license found in the - * LICENSE-MIT file in the root directory of this source tree or the Apache - * License, Version 2.0 found in the LICENSE-APACHE file in the root directory - * of this source tree. You may select, at your option, one of the - * above-listed licenses. - */ - -use std::env; - -fn main() { - let args: Vec = env::args().collect(); - println!("ARGS: {:?}", args); -} diff --git a/crates/base_db/src/fixture.rs b/crates/base_db/src/fixture.rs index 7464f4f793..cd1328d14d 100644 --- a/crates/base_db/src/fixture.rs +++ b/crates/base_db/src/fixture.rs @@ -87,6 +87,7 @@ pub trait WithFixture: Default + SourceDatabaseExt + 'static { let (fixture, change) = ChangeFixture::parse(fixture_str); let mut db = Self::default(); change.apply(&mut db, &|path| fixture.resolve_file_id(path)); + fixture.validate(&db); (db, fixture) } } @@ -101,6 +102,7 @@ pub struct ChangeFixture { pub diagnostics_enabled: DiagnosticsEnabled, pub tags: FxHashMap)>>, pub annotations: FxHashMap>, + pub expect_parse_errors: bool, } struct Builder { @@ -142,7 +144,7 @@ impl Builder { fn absolute_path(&self, path: String) -> String { if let Some(project_dir) = &self.project_dir { let project_dir_str = project_dir.path().as_os_str().to_str().unwrap(); - format!("{}/{}", project_dir_str, path) + format!("{project_dir_str}/{path}") } else { path } @@ -172,6 +174,7 @@ impl ChangeFixture { let FixtureWithProjectMeta { fixture, mut diagnostics_enabled, + expect_parse_errors, } = fixture_with_meta.clone(); let builder = Builder::new(diagnostics_enabled.clone()); @@ -199,10 +202,10 @@ impl ChangeFixture { let app_name = entry.app_data.name.clone(); - if let Some(otp_extra) = entry.otp { - if otp.is_none() { - otp = Some(otp_extra); - } + if let Some(otp_extra) = entry.otp + && otp.is_none() + { + otp = Some(otp_extra); } app_map.combine(entry.app_data); @@ -272,7 +275,7 @@ impl ChangeFixture { write!(tmp_file, "{}", &text).unwrap(); } - let json_config_file = format!("{}/build_info.json", project_dir_str); + let json_config_file = format!("{project_dir_str}/build_info.json"); let mut writer = File::create(&json_config_file).unwrap(); @@ -295,7 +298,7 @@ impl ChangeFixture { ProjectManifest::discover(&AbsPathBuf::assert(json_config_file.into())).unwrap(); let loaded_project = Project::load( &manifest, - elp_config.eqwalizer, + &elp_config, &BuckQueryConfig::BuildGeneratedCode, &|_| {}, ) @@ -344,6 +347,7 @@ impl ChangeFixture { diagnostics_enabled, tags, annotations, + expect_parse_errors, }, change, project, @@ -405,6 +409,64 @@ impl ChangeFixture { .get(&VfsPath::from(path.clone())) .cloned() } + + /// Validate all files in the fixture for syntax errors. + /// Panics with context if any syntax errors are found. + /// Skips validation if `expect_parse_errors` is set to true. + #[track_caller] + pub fn validate(&self, db: &DB) { + if self.expect_parse_errors { + return; + } + + let mut errors_found = Vec::new(); + + for file_id in &self.files { + let parse = db.parse(*file_id); + let errors = parse.errors(); + + if !errors.is_empty() { + let path = self + .files_by_path + .iter() + .find_map(|(vfs_path, id)| { + if id == file_id { + Some( + vfs_path + .as_path() + .map(|p| p.to_string()) + .unwrap_or_else(|| format!("{:?}", vfs_path)), + ) + } else { + None + } + }) + .unwrap_or_else(|| format!("FileId({:?})", file_id)); + + let file_text = SourceDatabaseExt::file_text(db, *file_id); + let tree = parse.tree(); + errors_found.push((path, file_text.to_string(), errors.to_vec(), tree)); + } + } + + if !errors_found.is_empty() { + let mut message = + String::from("Fixture validation failed: syntax errors found in test fixture\n\n"); + + for (path, text, errors, tree) in errors_found { + message.push_str(&format!("File: {}\n", path)); + message.push_str(&format!("Errors: {:?}\n", errors)); + message.push_str(&format!("Content:\n{}\n", text)); + message.push_str(&format!("Parse Tree:\n{:#?}\n", tree)); + message.push_str("---\n"); + } + message.push_str( + "If this is expected, add `//- expect_parse_errors` to the start of the fixture\n", + ); + + panic!("{}", message); + } + } } fn inc_file_id(file_id: &mut FileId) { @@ -484,8 +546,8 @@ bar() -> ?FOO. app_map: { SourceRootId( 0, - ): ( - Some( + ): AppMapData { + app_data: Some( AppData { project_id: ProjectId( 0, @@ -493,6 +555,7 @@ bar() -> ?FOO. name: AppName( "test-fixture", ), + buck_target_name: None, dir: AbsPathBuf( "/", ), @@ -518,12 +581,13 @@ bar() -> ?FOO. is_test_target: None, }, ), - None, - ), + applicable_files: None, + gen_src_files: None, + }, SourceRootId( 2, - ): ( - Some( + ): AppMapData { + app_data: Some( AppData { project_id: ProjectId( 1, @@ -531,6 +595,7 @@ bar() -> ?FOO. name: AppName( "comp", ), + buck_target_name: None, dir: AbsPathBuf( "/opt/lib/comp-1.3", ), @@ -567,12 +632,13 @@ bar() -> ?FOO. is_test_target: None, }, ), - None, - ), + applicable_files: None, + gen_src_files: None, + }, SourceRootId( 1, - ): ( - Some( + ): AppMapData { + app_data: Some( AppData { project_id: ProjectId( 0, @@ -580,6 +646,7 @@ bar() -> ?FOO. name: AppName( "foo-app", ), + buck_target_name: None, dir: AbsPathBuf( "/", ), @@ -605,14 +672,16 @@ bar() -> ?FOO. is_test_target: None, }, ), - None, - ), + applicable_files: None, + gen_src_files: None, + }, SourceRootId( 3, - ): ( - None, - None, - ), + ): AppMapData { + app_data: None, + applicable_files: None, + gen_src_files: None, + }, }, project_map: { ProjectId( @@ -664,13 +733,10 @@ bar() -> ?FOO. eqwalizer_config: EqwalizerConfig { enable_all: true, max_tasks: 4, + ignore_modules: [], + ignore_modules_compiled_patterns: [], }, - include_mapping: Some( - IncludeMapping { - includes: {}, - deps: {}, - }, - ), + include_mapping: None, }, ProjectId( 1, @@ -702,13 +768,10 @@ bar() -> ?FOO. eqwalizer_config: EqwalizerConfig { enable_all: true, max_tasks: 4, + ignore_modules: [], + ignore_modules_compiled_patterns: [], }, - include_mapping: Some( - IncludeMapping { - includes: {}, - deps: {}, - }, - ), + include_mapping: None, }, }, catch_all_source_root: SourceRootId( @@ -744,8 +807,8 @@ foo() -> ?BAR. app_map: { SourceRootId( 0, - ): ( - Some( + ): AppMapData { + app_data: Some( AppData { project_id: ProjectId( 0, @@ -753,6 +816,7 @@ foo() -> ?BAR. name: AppName( "test-fixture", ), + buck_target_name: None, dir: AbsPathBuf( "/extra", ), @@ -794,14 +858,16 @@ foo() -> ?BAR. is_test_target: None, }, ), - None, - ), + applicable_files: None, + gen_src_files: None, + }, SourceRootId( 1, - ): ( - None, - None, - ), + ): AppMapData { + app_data: None, + applicable_files: None, + gen_src_files: None, + }, }, project_map: { ProjectId( @@ -834,13 +900,10 @@ foo() -> ?BAR. eqwalizer_config: EqwalizerConfig { enable_all: true, max_tasks: 4, + ignore_modules: [], + ignore_modules_compiled_patterns: [], }, - include_mapping: Some( - IncludeMapping { - includes: {}, - deps: {}, - }, - ), + include_mapping: None, }, ProjectId( 1, @@ -862,13 +925,10 @@ foo() -> ?BAR. eqwalizer_config: EqwalizerConfig { enable_all: true, max_tasks: 4, + ignore_modules: [], + ignore_modules_compiled_patterns: [], }, - include_mapping: Some( - IncludeMapping { - includes: {}, - deps: {}, - }, - ), + include_mapping: None, }, }, catch_all_source_root: SourceRootId( diff --git a/crates/base_db/src/include.rs b/crates/base_db/src/include.rs index adad605cbf..685989edb4 100644 --- a/crates/base_db/src/include.rs +++ b/crates/base_db/src/include.rs @@ -10,6 +10,8 @@ use std::sync::Arc; +use elp_project_model::AppName; +use elp_project_model::buck::IncludeMappingScope; use elp_syntax::SmolStr; use vfs::FileId; use vfs::VfsPath; @@ -22,30 +24,53 @@ use crate::SourceRoot; pub struct IncludeCtx<'a> { db: &'a dyn SourceDatabase, source_root: Arc, - pub file_id: FileId, + /// The starting .erl file when resolving includes + pub orig_file_id: Option, + /// The current `FileId`. This starts out the same as + /// `orig_file_id`, but will change if a nested include file is + /// processed. The dependency graph for includes is calculated + /// based on the `orig_file_id`, if set. + pub current_file_id: FileId, } impl<'a> IncludeCtx<'a> { - pub fn new(db: &'a dyn SourceDatabase, file_id: FileId) -> Self { + pub fn new( + db: &'a dyn SourceDatabase, + orig_file_id: Option, + current_file_id: FileId, + ) -> Self { // Context for T171541590 - let _ = stdx::panic_context::enter(format!("\nIncludeCtx::new: {:?}", file_id)); - let source_root_id = db.file_source_root(file_id); + let _ = stdx::panic_context::enter(format!( + "\nIncludeCtx::new: {orig_file_id:?} {current_file_id:?}" + )); + let source_root_id = db.file_source_root(current_file_id); let source_root = db.source_root(source_root_id); Self { db, - file_id, + orig_file_id, + current_file_id, source_root, } } pub fn resolve_include(&self, path: &str) -> Option { + // Note, from https://www.erlang.org/doc/apps/erts/erlc_cmd#generally-useful-flags + // When encountering an `-include` or `-include_lib` directive, + // the compiler searches for header files in the following directories: + // + // - ".", the current working directory of the file server + // - The base name of the compiled file + // - The directories specified using option -I; the directory + // specified last is searched first self.resolve_relative(path) - .or_else(|| self.db.resolve_local(self.file_id, path.into())) + .or_else(|| self.db.resolve_local(self.current_file_id, path.into())) } pub fn resolve_include_lib(&self, path: &str) -> Option { - self.resolve_include(path) - .or_else(|| self.db.resolve_remote(self.file_id, path.into())) + self.resolve_include(path).or_else(|| { + self.db + .resolve_remote(self.orig_file_id, self.current_file_id, path.into()) + }) } pub fn resolve_include_doc(&self, path: &str) -> Option { @@ -53,53 +78,126 @@ impl<'a> IncludeCtx<'a> { } fn resolve_relative(&self, path: &str) -> Option { - self.source_root.relative_path(self.file_id, path) + self.source_root.relative_path(self.current_file_id, path) } - /// Called via salsa for inserting in the graph + /// Called via salsa for inserting in the graph. We are looking + /// for a base filename in the includes of the current app (from + /// the `file_id`) or any of its dependencies pub(crate) fn resolve_local_query( db: &dyn SourceDatabase, file_id: FileId, path: SmolStr, ) -> Option { let project_id = db.file_project_id(file_id)?; - if let Some(file_id) = db.mapped_include_file(project_id, path.clone()) { + let app_data = db.file_app_data(file_id)?; + if let Some(file_id) = db.mapped_include_file( + project_id, + IncludeMappingScope::Local(app_data.name.clone()), + path.clone(), + ) { Some(file_id) } else { - let path: &str = &path; - let app_data = db.file_app_data(file_id)?; - app_data.include_path.iter().find_map(|include| { - let name = include.join(path); - db.include_file_id(app_data.project_id, VfsPath::from(name.clone())) - }) + // Not in the current app, look in the dependencies + let include_file_index = db.include_file_index(project_id); + if let Some(file_path) = include_file_index + .include_mapping + .find_local(&app_data.name, &path) + { + include_file_index + .path_to_file_id + .get(&VfsPath::from(file_path.clone())) + .copied() + } else { + // Fallback for non-buck2 projects + let path: &str = &path; + app_data.include_path.iter().find_map(|include| { + let name = include.join(path); + db.include_file_id(app_data.project_id, VfsPath::from(name.clone())) + }) + } } } /// Called via salsa for inserting in the graph + /// When processing a .erl file, it can include other files, and so on recursively. + /// In this case, the starting file is the `orig_file_id`, and the current file is + /// the one being processed. pub(crate) fn resolve_remote_query( db: &dyn SourceDatabase, - file_id: FileId, + orig_file_id: Option, + current_file_id: FileId, path: SmolStr, ) -> Option { - let project_id = db.file_project_id(file_id)?; + let project_id = db.file_project_id(current_file_id)?; let project_data = db.project_data(project_id); - let include = if let Some(include_mapping) = &project_data.include_mapping { - include_mapping - .get(&path) + // `app_data` represents the app that is doing the including. + // If `orig_file_id` is set, we are possibly processing a + // nested include file. In this case we must do our checking + // based on its app data. + let app_data = orig_file_id + .map(|file_id| db.file_app_data(file_id)) + .unwrap_or_else(|| db.file_app_data(current_file_id))?; + let (app_name, include_path) = path.split_once('/')?; + let source_root_id = project_data.app_roots.get(app_name)?; + let target_app_data = db.app_data(source_root_id)?; + if let Some(include_mapping) = &project_data.include_mapping { + if let Some(p) = include_mapping + .get(IncludeMappingScope::Remote, &path) .map(|path| db.include_file_id(project_id, VfsPath::from(path.clone()))) + { + if p.is_some() { + // We have an entry in the include mapping, and it maps to a FileId + if let Some(target_full_name) = &app_data.buck_target_name { + // We have an entry for the lookup, only return it + // if it is in the dependencies + if include_mapping.is_dep(target_full_name, &AppName(app_name.to_string())) + { + p + } else { + // We have a lookup value, but it is not a + // dependency, do not do fallback processing + None + } + } else { + // This should not be possible. We only have + // an include mapping for a buck project, and + // so the `buck_target_name` should be + // populated. + log::warn!( + "include mapping without buck_target_name: app:{:?}, path:{}", + &app_data.name, + &path + ); + None + } + } else { + // We do have an entry in the include mapping, but + // it does not resolve to a valid FileId. + // This should also not happen. + log::warn!( + "include mapping does not resolve to FileId: app:{:?}, path:{}, p:{:?}", + &app_data.name, + &path, + &p + ); + None + } + } else { + // We did not find an entry in the include mapping. + None + } } else { - None - }; - include.unwrap_or_else(|| { - let (app_name, include_path) = path.split_once('/')?; - let source_root_id = project_data.app_roots.get(app_name)?; - let target_app_data = db.app_data(source_root_id)?; + // There is no include mapping. + // This is the path followed when it is not a buck2 + // project, as those are currently the only ones that + // populate the include_mapping. let path = target_app_data.dir.join(include_path); db.include_file_id(project_id, VfsPath::from(path.clone())) .or_else(|| { find_generated_include_lib(db, project_id, include_path, &target_app_data) }) - }) + } } } @@ -155,7 +253,8 @@ pub fn generated_file_include_lib( .iter() .find_map(|dir| include_path.as_path()?.strip_prefix(dir))?; let candidate = format!("{}/include/{}", inc_app_data.name, candidate_path.as_str()); - let resolved_file_id = IncludeCtx::new(db, file_id).resolve_include_lib(&candidate)?; + let resolved_file_id = + IncludeCtx::new(db, Some(file_id), file_id).resolve_include_lib(&candidate)?; if resolved_file_id == included_file_id { // We have an equivalent include Some(candidate) diff --git a/crates/base_db/src/input.rs b/crates/base_db/src/input.rs index f86dae5f61..eb914e2a7d 100644 --- a/crates/base_db/src/input.rs +++ b/crates/base_db/src/input.rs @@ -15,12 +15,13 @@ use std::sync::Arc; use elp_project_model::AppName; use elp_project_model::AppType; -use elp_project_model::ApplicableFiles; use elp_project_model::EqwalizerConfig; use elp_project_model::Project; use elp_project_model::ProjectAppData; use elp_project_model::buck::IncludeMapping; +use elp_project_model::buck::TargetFullName; use fxhash::FxHashMap; +use fxhash::FxHashSet; use paths::RelPath; use paths::Utf8Path; use vfs::AbsPathBuf; @@ -124,6 +125,8 @@ pub struct ProjectData { pub struct AppData { pub project_id: ProjectId, pub name: AppName, + /// Target name if this application originates from a buck target + pub buck_target_name: Option, pub dir: AbsPathBuf, /// Include directories belonging to this app only. Used for /// include_lib resolution @@ -176,11 +179,23 @@ impl AppData { /// Note that `AppStructure` is build-system agnostic #[derive(Debug, Clone, Default /* Serialize, Deserialize */)] pub struct AppStructure { - pub(crate) app_map: FxHashMap, Option)>, + pub(crate) app_map: FxHashMap, pub(crate) project_map: FxHashMap, pub(crate) catch_all_source_root: SourceRootId, } +#[derive(Debug, Clone, Default)] +pub struct AppMapData { + app_data: Option, // TODO: should this be Arc? + applicable_files: Option>, + gen_src_files: Option>, +} + +pub struct ApplyOutput { + pub unresolved_app_id_paths: FxHashMap, + pub gen_src_inputs: FxHashMap, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)] pub struct AppDataId(pub u32); @@ -189,13 +204,20 @@ impl AppStructure { &mut self, source_root_id: SourceRootId, app_data: Option, - applicable_files: Option, + applicable_files: Option>, + gen_src_files: Option>, ) { - let prev = self - .app_map - .insert(source_root_id, (app_data, applicable_files)); + let prev = self.app_map.insert( + source_root_id, + AppMapData { + app_data, + applicable_files, + gen_src_files, + }, + ); assert!(prev.is_none()); } + pub fn add_project_data(&mut self, project_id: ProjectId, project_data: ProjectData) { let prev = self.project_map.insert(project_id, project_data); assert!(prev.is_none()); @@ -206,15 +228,16 @@ impl AppStructure { self, db: &mut dyn SourceDatabaseExt, resolve_file_id: &impl Fn(&AbsPathBuf) -> Option, - ) -> FxHashMap { + ) -> ApplyOutput { let mut app_index = AppDataIndex::default(); let mut app_data_id = AppDataId(0); let mut unresolved_paths = FxHashMap::default(); - for (source_root_id, (data, applicable_files)) in self.app_map { - let arc_data = data.map(Arc::new); + let mut gen_src_inputs = FxHashMap::default(); + for (source_root_id, app_map_data) in self.app_map { + let arc_data = app_map_data.app_data.map(Arc::new); db.set_app_data_by_id(app_data_id, arc_data); db.set_app_data_id(source_root_id, app_data_id); - if let Some(files) = applicable_files { + if let Some(files) = app_map_data.applicable_files { files.iter().for_each(|path| { if let Some(file_id) = resolve_file_id(path) { app_index.map.insert(file_id, app_data_id); @@ -223,6 +246,11 @@ impl AppStructure { } }) } + if let Some(files) = app_map_data.gen_src_files { + for file in files { + gen_src_inputs.insert(file.clone(), app_data_id); + } + } app_data_id = AppDataId(app_data_id.0 + 1); } for (project_id, project_data) in self.project_map { @@ -231,7 +259,10 @@ impl AppStructure { db.set_app_index(Arc::new(app_index)); db.set_catch_all_source_root(self.catch_all_source_root); - unresolved_paths + ApplyOutput { + unresolved_app_id_paths: unresolved_paths, + gen_src_inputs, + } } } @@ -365,6 +396,7 @@ impl<'a> ProjectApps<'a> { let input_data = AppData { project_id, name: app.name.clone(), + buck_target_name: app.buck_target_name.clone(), dir: app.dir.clone(), include_dirs: app.include_dirs.clone(), include_path: app.include_path.clone(), @@ -376,7 +408,12 @@ impl<'a> ProjectApps<'a> { ebin_path: app.ebin.clone(), is_test_target: app.is_test_target, }; - app_structure.add_app_data(root_id, Some(input_data), app.applicable_files.clone()); + app_structure.add_app_data( + root_id, + Some(input_data), + app.applicable_files.clone(), + app.gen_src_files.clone(), + ); } let mut app_roots = project_root_map.remove(&project_id).unwrap_or_default(); @@ -392,14 +429,14 @@ impl<'a> ProjectApps<'a> { otp_project_id: self.otp_project_id, app_roots, eqwalizer_config: project.eqwalizer_config.clone(), - include_mapping: Some(project.include_mapping.clone()), + include_mapping: project.include_mapping.clone(), }; app_structure.add_project_data(project_id, project_data); } // Final SourceRoot for out-of-project files log::info!("Final source root: {:?}", SourceRootId(app_idx)); - app_structure.add_app_data(SourceRootId(app_idx), None, None); + app_structure.add_app_data(SourceRootId(app_idx), None, None, None); app_structure.catch_all_source_root = SourceRootId(app_idx); app_structure } diff --git a/crates/base_db/src/lib.rs b/crates/base_db/src/lib.rs index 3ec2f75408..0cd8df74c9 100644 --- a/crates/base_db/src/lib.rs +++ b/crates/base_db/src/lib.rs @@ -13,6 +13,7 @@ use std::sync::Arc; use elp_project_model::AppName; use elp_project_model::buck::IncludeMapping; +use elp_project_model::buck::IncludeMappingScope; use elp_syntax::AstNode; use elp_syntax::Parse; use elp_syntax::SmolStr; @@ -31,7 +32,7 @@ mod module_index; // Public API pub mod fixture; -// @fb-only +// @fb-only: mod meta_only; pub mod test_utils; pub use change::Change; pub use elp_project_model::AppType; @@ -177,7 +178,12 @@ pub trait SourceDatabase: FileLoader + salsa::Database { fn include_file_id(&self, project_id: ProjectId, path: VfsPath) -> Option; - fn mapped_include_file(&self, project_id: ProjectId, path: SmolStr) -> Option; + fn mapped_include_file( + &self, + project_id: ProjectId, + scope: IncludeMappingScope, + path: SmolStr, + ) -> Option; #[salsa::input] fn project_data(&self, id: ProjectId) -> Arc; @@ -224,7 +230,12 @@ pub trait SourceDatabase: FileLoader + salsa::Database { fn resolve_local(&self, file_id: FileId, path: SmolStr) -> Option; #[salsa::invoke(IncludeCtx::resolve_remote_query)] - fn resolve_remote(&self, file_id: FileId, path: SmolStr) -> Option; + fn resolve_remote( + &self, + orig_file_id: Option, + current_file_id: FileId, + path: SmolStr, + ) -> Option; } fn app_data(db: &dyn SourceDatabase, id: SourceRootId) -> Option> { @@ -253,16 +264,13 @@ fn module_index(db: &dyn SourceDatabase, project_id: ProjectId) -> Arc Option { let include_file_index = db.include_file_index(project_id); - let file_path = include_file_index.include_mapping.get(&path)?; + let file_path = include_file_index.include_mapping.get(scope, &path)?; include_file_index .path_to_file_id .get(&VfsPath::from(file_path.clone())) @@ -421,7 +430,7 @@ fn is_otp(db: &dyn SourceDatabase, file_id: FileId) -> Option { fn is_test_suite_or_test_helper(db: &dyn SourceDatabase, file_id: FileId) -> Option { // Context for T171541590 - let _ = stdx::panic_context::enter(format!("\nis_test_suite_or_test_helper: {:?}", file_id)); + let _ = stdx::panic_context::enter(format!("\nis_test_suite_or_test_helper: {file_id:?}")); let app_data = db.file_app_data(file_id)?; let root_id = db.file_source_root(file_id); let root = db.source_root(root_id); @@ -435,28 +444,28 @@ fn is_test_suite_or_test_helper(db: &dyn SourceDatabase, file_id: FileId) -> Opt fn file_app_type(db: &dyn SourceDatabase, file_id: FileId) -> Option { // Context for T171541590 - let _ = stdx::panic_context::enter(format!("\nfile_app_type: {:?}", file_id)); + let _ = stdx::panic_context::enter(format!("\nfile_app_type: {file_id:?}")); let app_data = db.file_app_data(file_id)?; Some(app_data.app_type) } fn file_app_name(db: &dyn SourceDatabase, file_id: FileId) -> Option { // Context for T171541590 - let _ = stdx::panic_context::enter(format!("\nfile_app_name: {:?}", file_id)); + let _ = stdx::panic_context::enter(format!("\nfile_app_name: {file_id:?}")); let app_data = db.file_app_data(file_id)?; Some(app_data.name.clone()) } fn file_project_id(db: &dyn SourceDatabase, file_id: FileId) -> Option { // Context for T171541590 - let _ = stdx::panic_context::enter(format!("\nfile_project_id: {:?}", file_id)); + let _ = stdx::panic_context::enter(format!("\nfile_project_id: {file_id:?}")); let app_data = db.file_app_data(file_id)?; Some(app_data.project_id) } pub fn module_name(db: &dyn SourceDatabase, file_id: FileId) -> Option { // Context for T171541590 - let _ = stdx::panic_context::enter(format!("\nmodule_name: {:?}", file_id)); + let _ = stdx::panic_context::enter(format!("\nmodule_name: {file_id:?}")); let app_data = db.file_app_data(file_id)?; let module_index = db.module_index(app_data.project_id); module_index.module_for_file(file_id).cloned() @@ -467,7 +476,7 @@ static ref IGNORED_SOURCES: Vec = { let regexes: Vec> = vec![ vec![Regex::new(r"^.*_SUITE_data/.+$").unwrap()], //ignore sources goes here - // @fb-only + // @fb-only: meta_only::ignored_sources_regexes() ]; regexes.into_iter().flatten().collect::>() }; @@ -475,7 +484,7 @@ static ref IGNORED_SOURCES: Vec = { fn file_kind(db: &dyn SourceDatabase, file_id: FileId) -> FileKind { // Context for T171541590 - let _ = stdx::panic_context::enter(format!("\nfile_kind: {:?}", file_id)); + let _ = stdx::panic_context::enter(format!("\nfile_kind: {file_id:?}")); let source_root_id = db.file_source_root(file_id); let source_root = db.source_root(source_root_id); let ignored_path = source_root @@ -487,7 +496,7 @@ fn file_kind(db: &dyn SourceDatabase, file_id: FileId) -> FileKind { }) .unwrap_or(false); // Context for T171541590 - let _ = stdx::panic_context::enter(format!("\nfile_kind: {:?}", file_id)); + let _ = stdx::panic_context::enter(format!("\nfile_kind: {file_id:?}")); if ignored_path { // not part of the known project model, and on list of ignored // sources, do not process @@ -550,7 +559,7 @@ impl FileLoader for FileLoaderDelegate<&'_ T> { /// If the `input` string represents an atom, and needs quoting, quote /// it. -pub fn to_quoted_string(input: &str) -> Cow { +pub fn to_quoted_string(input: &str) -> Cow<'_, str> { fn is_valid_atom(input: &str) -> bool { let mut chars = input.chars(); chars.next().is_some_and(|c| c.is_lowercase()) diff --git a/crates/base_db/src/module_index.rs b/crates/base_db/src/module_index.rs index 389f98ce44..5f245dd9d5 100644 --- a/crates/base_db/src/module_index.rs +++ b/crates/base_db/src/module_index.rs @@ -35,7 +35,7 @@ impl ModuleName { self } - pub fn to_quoted_string(&self) -> Cow { + pub fn to_quoted_string(&self) -> Cow<'_, str> { to_quoted_string(self.as_str()) } } diff --git a/crates/base_db/src/test_utils.rs b/crates/base_db/src/test_utils.rs index fd7250d105..f0192a368a 100644 --- a/crates/base_db/src/test_utils.rs +++ b/crates/base_db/src/test_utils.rs @@ -22,10 +22,10 @@ pub use dissimilar::diff as __diff; /// `eprintln!()` macro in case of text inequality. #[macro_export] macro_rules! assert_eq_text { - ($left:expr_2021, $right:expr_2021) => { + ($left:expr, $right:expr) => { assert_eq_text!($left, $right,) }; - ($left:expr_2021, $right:expr_2021, $($tt:tt)*) => {{ + ($left:expr, $right:expr, $($tt:tt)*) => {{ let left = $left; let right = $right; if left != right { @@ -46,8 +46,8 @@ pub fn format_diff(chunks: Vec) -> String { for chunk in chunks { let formatted = match chunk { dissimilar::Chunk::Equal(text) => text.into(), - dissimilar::Chunk::Delete(text) => format!("\x1b[41m{}\x1b[0m", text), - dissimilar::Chunk::Insert(text) => format!("\x1b[42m{}\x1b[0m", text), + dissimilar::Chunk::Delete(text) => format!("\x1b[41m{text}\x1b[0m"), + dissimilar::Chunk::Insert(text) => format!("\x1b[42m{text}\x1b[0m"), }; buf.push_str(&formatted); } diff --git a/crates/elp/Cargo.toml b/crates/elp/Cargo.toml index 66552ada1d..2a1b6e8b65 100644 --- a/crates/elp/Cargo.toml +++ b/crates/elp/Cargo.toml @@ -18,10 +18,10 @@ workspace = true [dependencies] elp_eqwalizer.workspace = true elp_ide.workspace = true +elp_ide_db.workspace = true elp_log.workspace = true elp_project_model.workspace = true elp_syntax.workspace = true -elp_text_edit.workspace = true hir.workspace = true always-assert.workspace = true diff --git a/crates/elp/build.rs b/crates/elp/build.rs index d55fabfa0b..be24e95092 100644 --- a/crates/elp/build.rs +++ b/crates/elp/build.rs @@ -33,7 +33,7 @@ fn main() { OffsetDateTime::from_unix_timestamp(timestamp).expect("parsing SOURCE_DATE_EPOCH") } Err(std::env::VarError::NotPresent) => OffsetDateTime::now_utc(), - Err(e) => panic!("Error getting SOURCE_DATE_EPOCH: {}", e), + Err(e) => panic!("Error getting SOURCE_DATE_EPOCH: {e}"), }; date.format(&date_format).expect("formatting date") } else { @@ -43,20 +43,14 @@ fn main() { let cargo_manifest_dir = env::var(CARGO_MANIFEST_DIR) .expect("CARGO_MANIFEST_DIR should be set automatically by cargo"); let eqwalizer_support_dir = match eqwalizer_dir { - Ok(eqwalizer_support_dir) => format!("{}/../eqwalizer_support", eqwalizer_support_dir), - Err(_) => format!( - "{}/../../../eqwalizer/eqwalizer_support", - cargo_manifest_dir - ), + Ok(eqwalizer_support_dir) => format!("{eqwalizer_support_dir}/../eqwalizer_support"), + Err(_) => format!("{cargo_manifest_dir}/../../../eqwalizer/eqwalizer_support"), }; println!("cargo:rerun-if-changed=build.rs"); - println!("cargo:rerun-if-env-changed={}", SOURCE_DATE_EPOCH); - println!("cargo:rerun-if-env-changed={}", CI); - println!("cargo:rustc-env=BUILD_ID={}", build_id); - println!( - "cargo:rustc-env={}={}", - EQWALIZER_SUPPORT_DIR, eqwalizer_support_dir - ); - println!("cargo:rerun-if-env-changed={}", EQWALIZER_DIR); + println!("cargo:rerun-if-env-changed={SOURCE_DATE_EPOCH}"); + println!("cargo:rerun-if-env-changed={CI}"); + println!("cargo:rustc-env=BUILD_ID={build_id}"); + println!("cargo:rustc-env={EQWALIZER_SUPPORT_DIR}={eqwalizer_support_dir}"); + println!("cargo:rerun-if-env-changed={EQWALIZER_DIR}"); } diff --git a/crates/elp/src/arc_types.rs b/crates/elp/src/arc_types.rs index a1b40a5b25..374dcdba2f 100644 --- a/crates/elp/src/arc_types.rs +++ b/crates/elp/src/arc_types.rs @@ -8,13 +8,14 @@ * above-listed licenses. */ -/// Types as defined in https://www.internalfb.com/intern/wiki/Linting/adding-linters/#flow-type -/// and https://www.internalfb.com/code/fbsource/[1238f73dac0efd4009443fee6a345a680dc9401b]/whatsapp/server/erl/tools/lint/arcanist.py?lines=17 / +// @fb-only: /// Types as defined in https://www.internalfb.com/intern/wiki/Linting/adding-linters/#flow-type +// @fb-only: /// and https://www.internalfb.com/code/fbsource/[1238f73dac0efd4009443fee6a345a680dc9401b]/whatsapp/server/erl/tools/lint/arcanist.py?lines=17 use std::path::Path; use serde::Serialize; #[derive(Debug, Serialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] pub struct Diagnostic { // Filepath path: String, @@ -29,6 +30,7 @@ pub struct Diagnostic { original: Option, replacement: Option, description: Option, + doc_path: Option, } #[derive(Debug, Serialize, PartialEq, Eq)] @@ -42,6 +44,7 @@ pub enum Severity { } impl Diagnostic { + #[allow(clippy::too_many_arguments)] pub fn new( path: &Path, line: u32, @@ -50,6 +53,7 @@ impl Diagnostic { name: String, description: String, original: Option, + doc_path: Option, ) -> Self { Diagnostic { path: path.display().to_string(), // lossy on Windows for unicode paths @@ -61,6 +65,7 @@ impl Diagnostic { original, replacement: None, description: Some(description), + doc_path, } } } diff --git a/crates/elp/src/bin/args.rs b/crates/elp/src/bin/args.rs index f4a2d282a0..c9790e9314 100644 --- a/crates/elp/src/bin/args.rs +++ b/crates/elp/src/bin/args.rs @@ -11,13 +11,20 @@ use std::cmp::Ordering; use std::env; use std::fs; +use std::io::IsTerminal; use std::path::PathBuf; +use anyhow::Result; +use anyhow::bail; use bpaf::Bpaf; use bpaf::Parser; use bpaf::construct; use bpaf::long; +use elp_ide::elp_ide_db::DiagnosticCode; use elp_project_model::buck::BuckQueryConfig; +use hir::fold::MacroStrategy; +use hir::fold::ParenStrategy; +use hir::fold::Strategy; use itertools::Itertools; use serde::Deserialize; @@ -62,6 +69,20 @@ pub struct ParseAllElp { guard(format_guard, "Please use json") )] pub format: Option, + /// Report system memory usage and other statistics + #[bpaf(long("report-system-stats"))] + pub report_system_stats: bool, + /// Minimum severity level to report. Valid values: error, warning, weak_warning, information + #[bpaf( + argument("SEVERITY"), + complete(severity_completer), + fallback(None), + guard( + severity_guard, + "Please use error, warning, weak_warning, or information" + ) + )] + pub severity: Option, } #[derive(Clone, Debug, Bpaf)] @@ -134,8 +155,6 @@ pub struct EqwalizeAll { /// Also eqwalize opted-in generated modules from project (deprecated) #[bpaf(hide)] pub include_generated: bool, - /// Also eqwalize test modules from project - pub include_tests: bool, /// Exit with a non-zero status code if any errors are found pub bail_on_error: bool, /// Print statistics when done @@ -152,8 +171,6 @@ pub struct EqwalizeTarget { /// Also eqwalize opted-in generated modules from application (deprecated) #[bpaf(hide)] pub include_generated: bool, - /// Also eqwalize test modules from project - pub include_tests: bool, /// Exit with a non-zero status code if any errors are found pub bail_on_error: bool, /// target, like //erl/chatd/... @@ -172,8 +189,6 @@ pub struct EqwalizeApp { /// Also eqwalize opted-in generated modules from project (deprecated) #[bpaf(hide)] pub include_generated: bool, - /// Also eqwalize test modules from project - pub include_tests: bool, /// Run with rebar pub rebar: bool, /// Exit with a non-zero status code if any errors are found @@ -196,8 +211,6 @@ pub struct EqwalizeStats { /// Also eqwalize opted-in generated modules from project (deprecated) #[bpaf(hide)] pub include_generated: bool, - /// Also eqwalize test modules from project - pub include_tests: bool, /// If specified, use the provided CLI severity mapping instead of the default one pub use_cli_severity: bool, } @@ -237,7 +250,7 @@ pub struct Lint { #[bpaf(argument("MODULE"))] pub module: Option, /// Parse a single application from the project, not the entire project. - #[bpaf(argument("APP"))] + #[bpaf(long("app"), long("application"), argument("APP"))] pub app: Option, /// Parse a single file from the project, not the entire project. This can be an include file or escript, etc. #[bpaf(argument("FILE"))] @@ -265,8 +278,6 @@ pub struct Lint { guard(format_guard, "Please use json") )] pub format: Option, - /// Optional prefix to prepend to each diagnostic file path. Only used when --format=json is set - pub prefix: Option, /// Include diagnostics produced by erlc pub include_erlc_diagnostics: bool, @@ -285,7 +296,7 @@ pub struct Lint { #[bpaf(argument("CODE"))] pub diagnostic_ignore: Option, /// Filter out all reported diagnostics except this one, by code or label - #[bpaf(argument("CODE"))] + #[bpaf(argument("CODE"), complete(diagnostic_code_completer))] pub diagnostic_filter: Option, #[bpaf(external(parse_experimental_diags))] pub experimental_diags: bool, @@ -319,11 +330,106 @@ pub struct Lint { /// than one at a time. pub one_shot: bool, + /// Report system memory usage and other statistics + #[bpaf(long("report-system-stats"))] + pub report_system_stats: bool, + + /// Disable streaming of diagnostics when applying fixes (collect all before printing) + pub no_stream: bool, + /// Rest of args are space separated list of apps to ignore #[bpaf(positional("IGNORED_APPS"))] pub ignore_apps: Vec, } +#[derive(Clone, Debug, Bpaf)] +pub struct Ssr { + /// Path to directory with project, or to a JSON file (defaults to `.`) + #[bpaf(argument("PROJECT"), fallback(PathBuf::from(".")))] + pub project: PathBuf, + /// Parse a single module from the project, not the entire project. + #[bpaf(argument("MODULE"))] + pub module: Option, + /// Parse a single application from the project, not the entire project. + #[bpaf(long("app"), long("application"), argument("APP"))] + pub app: Option, + /// Parse a single file from the project, not the entire project. This can be an include file or escript, etc. + #[bpaf(argument("FILE"))] + pub file: Option, + + /// Run with rebar + pub rebar: bool, + /// Rebar3 profile to pickup (default is test) + #[bpaf(long("as"), argument("PROFILE"), fallback("test".to_string()))] + pub profile: String, + + /// Also generate diagnostics for generated files + pub include_generated: bool, + /// Also generate diagnostics for test files + pub include_tests: bool, + + /// Show diagnostics in JSON format + #[bpaf( + argument("FORMAT"), + complete(format_completer), + fallback(None), + guard(format_guard, "Please use json") + )] + pub format: Option, + + /// Macro expansion strategy: expand | no-expand | visible-expand (default expand) + #[bpaf( + long("macros"), + argument("STRATEGY"), + complete(macros_completer), + fallback(None), + guard(macros_guard, "Please supply a valid macro expansion value") + )] + pub macro_strategy: Option, + + /// Explicitly match parentheses. If omitted, they are ignored. + #[bpaf(long("parens"))] + pub paren_strategy: bool, + + /// Dump a configuration snippet that can be put in .elp_lint.toml to match the given SSR patterns + pub dump_config: bool, + + /// Show source code context for matches + #[bpaf(long("show-source"))] + pub show_source: bool, + + /// Print NUM lines of leading context, enables --show-source + #[bpaf(short('B'), long("before-context"), argument("NUM"))] + pub before_context: Option, + + /// Print NUM lines of trailing context, enables --show-source + #[bpaf(short('A'), long("after-context"), argument("NUM"))] + pub after_context: Option, + + /// Print NUM lines of output context, enables --show-source + #[bpaf(short('C'), long("context"), argument("NUM"))] + pub context: Option, + + /// Print SEP on line between matches with context, enables --show-source + #[bpaf(long("group-separator"), argument("SEP"))] + pub group_separator: Option, + + /// Do not print separator for matches with context, enables --show-source + #[bpaf(long("no-group-separator"))] + pub no_group_separator: bool, + + /// Report system memory usage and other statistics + #[bpaf(long("report-system-stats"))] + pub report_system_stats: bool, + + /// SSR specs to use + #[bpaf( + positional("SSR_SPECS"), + guard(at_least_1, "there should be at least one spec") + )] + pub ssr_specs: Vec, +} + #[derive(Clone, Debug, Bpaf)] pub struct Explain { /// Error code to explain @@ -348,6 +454,8 @@ pub struct ProjectInfo { pub to: Option, /// Include the buck uquery results in the output pub buck_query: bool, + /// Dump a list of targets and their types + pub target_types: bool, } #[derive(Clone, Debug, Bpaf)] @@ -366,8 +474,6 @@ pub struct Glean { pub pretty: bool, /// Output each fact separately pub multi: bool, - /// Optional prefix to prepend to each fact - pub prefix: Option, } #[derive(Clone, Debug, Bpaf)] @@ -387,6 +493,7 @@ pub enum Command { GenerateCompletions(GenerateCompletions), RunServer(RunServer), Lint(Lint), + Ssr(Ssr), Version(Version), Shell(Shell), Explain(Explain), @@ -407,25 +514,50 @@ pub struct Args { pub escript: Option, pub no_log_buffering: bool, - /// When using buck, invoke a build step for generated files. - #[allow(dead_code)] // Until T208401551 done - pub buck_generated: bool, - /// When using buck, do not invoke a build step for generated files. pub no_buck_generated: bool, + /// Use buck2 targets for first stage project loading + pub buck_quick_start: bool, + + /// Use color in output; WHEN is 'always', 'never', or 'auto' + #[bpaf( + long("color"), + long("colour"), + argument("WHEN"), + fallback(Some("always".to_string())), + guard(color_guard, "Please use always, never, or auto") + )] + pub color: Option, + #[bpaf(external(command))] pub command: Command, } impl Args { pub fn query_config(&self) -> BuckQueryConfig { - if self.no_buck_generated { + if self.buck_quick_start { + BuckQueryConfig::BuckTargetsOnly + } else if self.no_buck_generated { BuckQueryConfig::NoBuildGeneratedCode } else { BuckQueryConfig::BuildGeneratedCode } } + + /// Determine if color should be used based on the --color argument + pub fn should_use_color(&self) -> bool { + match self.color.as_deref() { + Some("always") => true, + Some("never") => false, + Some("auto") | None => { + // Check NO_COLOR environment variable - if set (regardless of value), disable color + // Also check if stdout is connected to a TTY + env::var("NO_COLOR").is_err() && std::io::stdout().is_terminal() + } + _ => false, // Should be caught by the guard, but handle anyway + } + } } pub fn command() -> impl Parser { @@ -469,7 +601,8 @@ pub fn command() -> impl Parser { .map(Command::EqwalizeStats) .to_options() .command("eqwalize-stats") - .help("Return statistics about code quality for eqWAlizer"); + .help("Return statistics about code quality for eqWAlizer") + .hide(); let dialyze_all = dialyze_all() .map(Command::DialyzeAll) @@ -496,6 +629,18 @@ pub fn command() -> impl Parser { .command("lint") .help("Parse files in project and emit diagnostics, optionally apply fixes."); + let search = ssr() + .map(Command::Ssr) + .to_options() + .command("search") + .help("Alias for 'ssr': Run SSR (Structural Search and Replace) pattern matching on project files."); + + let ssr = ssr() + .map(Command::Ssr) + .to_options() + .command("ssr") + .help("Run SSR (Structural Search and Replace) pattern matching on project files."); + let run_server = run_server() .map(Command::RunServer) .to_options() @@ -539,23 +684,26 @@ pub fn command() -> impl Parser { .help("Dump a JSON config stanza suitable for use in VS Code project.json"); construct!([ + // Note: The order here is what is used for `elp --help` output + version, + run_server, + shell, eqwalize, eqwalize_all, eqwalize_app, eqwalize_target, + eqwalize_stats, dialyze_all, lint, - run_server, - generate_completions, + ssr, + search, parse_all, parse_elp, - build_info, - version, - shell, - eqwalize_stats, explain, + build_info, project_info, glean, + generate_completions, config_stanza, ]) .fallback(Help()) @@ -592,11 +740,11 @@ fn module_completer(input: &String) -> Vec<(String, Option)> { potential_path = path.parent(); continue; } else { - if let Ok(content) = fs::read_to_string(file_path) { - if let Ok(config) = toml::from_str::(&content) { - for module_name in config.modules.into_iter() { - modules.push(module_name) - } + if let Ok(content) = fs::read_to_string(file_path) + && let Ok(config) = toml::from_str::(&content) + { + for module_name in config.modules.into_iter() { + modules.push(module_name) } } break; @@ -605,6 +753,27 @@ fn module_completer(input: &String) -> Vec<(String, Option)> { get_suggesions(input, modules) } +fn diagnostic_code_completer(input: &Option) -> Vec<(String, Option)> { + let codes: Vec = DiagnosticCode::codes_iter() + .filter(|code| match code { + DiagnosticCode::DefaultCodeForEnumIter + | DiagnosticCode::ErlangService(_) + | DiagnosticCode::Eqwalizer(_) + | DiagnosticCode::AdHoc(_) => false, + _ => true, + }) + .flat_map(|code| vec![code.as_code().to_string(), code.as_label().to_string()]) + .collect(); + codes + .into_iter() + .filter(|code| match input { + None => true, + Some(prefix) => code.starts_with(prefix), + }) + .map(|c| (c.to_string(), None)) + .collect::>() +} + fn format_completer(_: &Option) -> Vec<(String, Option)> { vec![("json".to_string(), None)] } @@ -617,6 +786,48 @@ fn format_guard(format: &Option) -> bool { } } +fn severity_completer(_: &Option) -> Vec<(String, Option)> { + vec![ + ("error".to_string(), None), + ("warning".to_string(), None), + ("weak_warning".to_string(), None), + ("information".to_string(), None), + ] +} + +fn severity_guard(severity: &Option) -> bool { + match severity { + None => true, + Some(s) if s == "error" || s == "warning" || s == "weak_warning" || s == "information" => { + true + } + _ => false, + } +} + +fn macros_completer(_: &Option) -> Vec<(String, Option)> { + vec![ + ("expand".to_string(), None), + ("no-expand".to_string(), None), + ("visible-expand".to_string(), None), + ] +} + +fn macros_guard(format: &Option) -> bool { + match format { + None => true, + Some(_) => parse_macro_strategy(format).is_ok(), + } +} + +fn color_guard(color: &Option) -> bool { + match color { + None => true, + Some(c) if c == "always" || c == "never" || c == "auto" => true, + _ => false, + } +} + #[allow(clippy::ptr_arg)] // This is needed in the BPAF macros fn at_least_1(data: &Vec) -> bool { !data.is_empty() @@ -697,6 +908,44 @@ impl Lint { pub fn is_format_json(&self) -> bool { self.format == Some("json".to_string()) } + + /// To prevent flaky test results we allow disabling streaming when applying fixes + pub fn skip_stream_print(&self) -> bool { + self.apply_fix || self.no_stream + } +} + +fn parse_macro_strategy(macro_strategy: &Option) -> Result { + match macro_strategy.as_deref() { + Some("no-expand") => Ok(MacroStrategy::DoNotExpand), + Some("expand") => Ok(MacroStrategy::Expand), + Some("visible-expand") => Ok(MacroStrategy::ExpandButIncludeMacroCall), + None => Ok(MacroStrategy::Expand), + Some(s) => bail!( + "Invalid macro strategy '{}'. Valid options are: expand, no-expand, visible-expand", + s + ), + } +} + +impl Ssr { + pub fn is_format_normal(&self) -> bool { + self.format.is_none() + } + + pub fn is_format_json(&self) -> bool { + self.format == Some("json".to_string()) + } + + pub fn parse_strategy(&self) -> Result { + let macros = parse_macro_strategy(&self.macro_strategy)?; + let parens = if self.paren_strategy { + ParenStrategy::VisibleParens + } else { + ParenStrategy::InvisibleParens + }; + Ok(Strategy { macros, parens }) + } } impl ParseAllElp { diff --git a/crates/elp/src/bin/build_info_cli.rs b/crates/elp/src/bin/build_info_cli.rs index 9029927542..2308acb2a7 100644 --- a/crates/elp/src/bin/build_info_cli.rs +++ b/crates/elp/src/bin/build_info_cli.rs @@ -15,15 +15,18 @@ use std::io::Write; use anyhow::Result; use elp_ide::elp_ide_db::elp_base_db::AbsPath; use elp_ide::elp_ide_db::elp_base_db::AbsPathBuf; +use elp_project_model::AppType; use elp_project_model::ElpConfig; -use elp_project_model::EqwalizerConfig; use elp_project_model::IncludeParentDirs; use elp_project_model::Project; +use elp_project_model::ProjectAppData; use elp_project_model::ProjectBuildData; use elp_project_model::ProjectManifest; use elp_project_model::buck::BuckQueryConfig; -use elp_project_model::buck::query_buck_targets_bxl; +use elp_project_model::buck::BuckTarget; +use elp_project_model::buck::query_buck_targets; use elp_project_model::json::JsonConfig; +use fxhash::FxHashMap; use crate::args::BuildInfo; use crate::args::ProjectInfo; @@ -31,8 +34,8 @@ use crate::args::ProjectInfo; pub(crate) fn save_build_info(args: BuildInfo, query_config: &BuckQueryConfig) -> Result<()> { let root = fs::canonicalize(&args.project)?; let root = AbsPathBuf::assert_utf8(root); - let (_elp_config, manifest) = ProjectManifest::discover(&root)?; - let project = Project::load(&manifest, EqwalizerConfig::default(), query_config, &|_| {})?; + let (elp_config, manifest) = ProjectManifest::discover(&root)?; + let project = Project::load(&manifest, &elp_config, query_config, &|_| {})?; let mut writer = File::create(&args.to)?; let json_str = serde_json::to_string_pretty::(&project.as_json(root))?; writer.write_all(json_str.as_bytes())?; @@ -63,28 +66,71 @@ pub(crate) fn save_project_info(args: ProjectInfo, query_config: &BuckQueryConfi } }; - if args.buck_query { - if let ProjectBuildData::Buck(buck) = &project.project_build_data { - let buck_targets_query = query_buck_targets_bxl(&buck.buck_conf, query_config); - writer.write_all(b"================buck targets query raw================\n")?; + if args.buck_query + && let ProjectBuildData::Buck(buck) = &project.project_build_data + { + let buck_targets_query = query_buck_targets(&buck.buck_conf, query_config); + if let Ok(targets) = &buck_targets_query { + writer.write_all(format!("{:#?}\n", sort_buck_targets(targets)).as_bytes())?; + } else { writer.write_all(format!("{:#?}\n", &buck_targets_query).as_bytes())?; - }; + } + } else if args.target_types { + writer.write_all(b"================target types================\n")?; + for line in buck_targets_and_types(&project.project_apps) { + writer.write_all(format!("{}\n", line).as_bytes())?; + } + } else { + writer.write_all(b"================manifest================\n")?; + writer.write_all(format!("{:#?}\n", &manifest).as_bytes())?; + writer.write_all(b"================project_build_data================\n")?; + writer.write_all(format!("{:#?}\n", &project.project_build_data).as_bytes())?; + writer.write_all(b"================project_app_data================\n")?; + writer.write_all(format!("{:#?}\n", &project.project_apps).as_bytes())?; } - writer.write_all(b"================manifest================\n")?; - writer.write_all(format!("{:#?}\n", &manifest).as_bytes())?; - writer.write_all(b"================project_build_data================\n")?; - writer.write_all(format!("{:#?}\n", &project.project_build_data).as_bytes())?; - writer.write_all(b"================project_app_data================\n")?; - writer.write_all(format!("{:#?}\n", &project.project_apps).as_bytes())?; Ok(()) } +fn sort_buck_targets(hash_map: &FxHashMap) -> Vec<(String, &BuckTarget)> { + let mut vec = hash_map + .iter() + .map(|(n, t)| (format!("target_name:{}", n), t)) + .collect::>(); + vec.sort_by(|a, b| a.0.cmp(&b.0)); + vec +} + +fn buck_targets_and_types(apps: &[ProjectAppData]) -> Vec { + let tn = |tn| -> String { + if let Some(tn) = tn { + tn + } else { + "".to_string() + } + }; + let mut vec = apps + .iter() + .filter(|app| app.app_type != AppType::Otp) + .filter(|app| app.is_buck_generated != Some(true)) + .map(|app| { + format!( + "{:?} {:<30} {}", + app.app_type, + app.name, + tn(app.buck_target_name.clone()) + ) + }) + .collect::>(); + vec.sort(); + vec +} + fn load_project( root: &AbsPath, query_config: &BuckQueryConfig, ) -> Result<(ProjectManifest, Project)> { let (elp_config, manifest) = ProjectManifest::discover(root)?; - let project = Project::load(&manifest, elp_config.eqwalizer, query_config, &|_| {})?; + let project = Project::load(&manifest, &elp_config, query_config, &|_| {})?; Ok((manifest, project)) } @@ -94,6 +140,6 @@ fn load_fallback( ) -> Result<(ProjectManifest, Project)> { let manifest = ProjectManifest::discover_no_manifest(root, IncludeParentDirs::Yes); let elp_config = ElpConfig::default(); - let project = Project::load(&manifest, elp_config.eqwalizer, query_config, &|_| {})?; + let project = Project::load(&manifest, &elp_config, query_config, &|_| {})?; Ok((manifest, project)) } diff --git a/crates/elp/src/bin/config_stanza.rs b/crates/elp/src/bin/config_stanza.rs index 97e76f1b4f..a0f0650c6b 100644 --- a/crates/elp/src/bin/config_stanza.rs +++ b/crates/elp/src/bin/config_stanza.rs @@ -16,5 +16,5 @@ use crate::args::ConfigStanza; pub fn config_stanza(_args: &ConfigStanza, cli: &mut dyn Cli) -> Result<()> { let schema = format!("{:#}", Config::json_schema()); - Ok(writeln!(cli, "{}", schema)?) + Ok(writeln!(cli, "{schema}")?) } diff --git a/crates/elp/src/bin/elp_parse_cli.rs b/crates/elp/src/bin/elp_parse_cli.rs index e8d36aa247..770ab60456 100644 --- a/crates/elp/src/bin/elp_parse_cli.rs +++ b/crates/elp/src/bin/elp_parse_cli.rs @@ -14,6 +14,7 @@ use std::io::Write; use std::path::Path; use std::path::PathBuf; use std::str; +use std::time::SystemTime; use anyhow::Result; use anyhow::bail; @@ -21,8 +22,8 @@ use elp::build::load; use elp::build::types::LoadResult; use elp::cli::Cli; use elp::convert; +use elp::memory_usage::MemoryUsage; use elp::otp_file_to_ignore; -use elp::server::file_id_to_url; use elp_eqwalizer::Mode; use elp_ide::Analysis; use elp_ide::diagnostics; @@ -39,6 +40,7 @@ use elp_ide::elp_ide_db::elp_base_db::IncludeOtp; use elp_ide::elp_ide_db::elp_base_db::ModuleName; use elp_ide::elp_ide_db::elp_base_db::Vfs; use elp_ide::elp_ide_db::elp_base_db::VfsPath; +use elp_log::telemetry; use elp_project_model::AppType; use elp_project_model::DiscoverConfig; use elp_project_model::buck::BuckQueryConfig; @@ -53,6 +55,36 @@ use vfs::AbsPath; use crate::args::ParseAllElp; use crate::reporting; +use crate::reporting::print_memory_usage; + +fn parse_severity(severity: &str) -> Option { + match severity { + "error" => Some(diagnostics::Severity::Error), + "warning" => Some(diagnostics::Severity::Warning), + "weak_warning" => Some(diagnostics::Severity::WeakWarning), + "information" => Some(diagnostics::Severity::Information), + _ => None, + } +} + +fn severity_rank(severity: diagnostics::Severity) -> u8 { + match severity { + diagnostics::Severity::Error => 1, + diagnostics::Severity::Warning => 2, + diagnostics::Severity::WeakWarning => 3, + diagnostics::Severity::Information => 4, + } +} + +fn meets_severity_threshold( + diag_severity: diagnostics::Severity, + min_severity: Option, +) -> bool { + match min_severity { + None => true, + Some(min) => severity_rank(diag_severity) <= severity_rank(min), + } +} #[derive(Debug)] struct ParseResult { @@ -68,6 +100,10 @@ pub fn parse_all( ) -> Result<()> { log::info!("Loading project at: {:?}", args.project); + let start_time = SystemTime::now(); + // Track memory usage at the start + let memory_start = MemoryUsage::now(); + let config = DiscoverConfig::new(args.rebar, &args.profile); let loaded = load::load_project_at( cli, @@ -87,7 +123,7 @@ pub fn parse_all( let (file_id, name) = match &args.module { Some(module) => { if args.is_format_normal() { - writeln!(cli, "module specified: {}", module)?; + writeln!(cli, "module specified: {module}")?; } let file_id = analysis.module_file_id(loaded.project_id, module)?; (file_id, analysis.module_name(file_id.unwrap())?) @@ -96,7 +132,7 @@ pub fn parse_all( None => match &args.file { Some(file_name) => { if args.is_format_normal() { - writeln!(cli, "file specified: {}", file_name)?; + writeln!(cli, "file specified: {file_name}")?; } let path_buf = Utf8PathBuf::from_path_buf(fs::canonicalize(file_name).unwrap()) .expect("UTF8 conversion failed"); @@ -124,10 +160,9 @@ pub fn parse_all( (None, _, true) => do_parse_all_seq(cli, &loaded, &cfg, &args.to)?, (None, _, false) => do_parse_all_par(cli, &loaded, &cfg, &args.to)?, (Some(file_id), Some(name), _) => { - do_parse_one(&analysis, &loaded.vfs, &cfg, &args.to, file_id, &name)? - .map_or(vec![], |x| vec![x]) + do_parse_one(&analysis, &cfg, &args.to, file_id, &name)?.map_or(vec![], |x| vec![x]) } - (Some(file_id), _, _) => panic!("Could not get name from file_id for {:?}", file_id), + (Some(file_id), _, _) => panic!("Could not get name from file_id for {file_id:?}"), }; if args.dump_include_resolutions { @@ -136,14 +171,32 @@ pub fn parse_all( let db = loaded.analysis_host.raw_database(); - // We need a `Url` for converting to the lsp_types::Diagnostic for - // printing, but do not print it out. So just create a dummy value - let url = lsp_types::Url::parse("file:///unused_url").ok().unwrap(); + telemetry::report_elapsed_time("parse-elp operational", start_time); + + let memory_end = MemoryUsage::now(); + let memory_used = memory_end - memory_start; + + let min_severity = args + .severity + .as_ref() + .and_then(|s| parse_severity(s.as_str())); + + res.retain(|parse_result| { + parse_result + .diagnostics + .diagnostics_for(parse_result.file_id) + .iter() + .any(|diag| meets_severity_threshold(diag.severity, min_severity)) + }); if res.is_empty() { if args.is_format_normal() { writeln!(cli, "No errors reported")?; } + if args.is_format_normal() && args.report_system_stats { + print_memory_usage(loaded.analysis_host, loaded.vfs, cli)?; + writeln!(cli, "{}", memory_used)?; + } Ok(()) } else { if args.is_format_normal() { @@ -154,6 +207,7 @@ pub fn parse_all( for diags in res { let mut combined: Vec = diags.diagnostics.diagnostics_for(diags.file_id); + combined.retain(|diag| meets_severity_threshold(diag.severity, min_severity)); if args.is_format_normal() { writeln!(cli, " {}: {}", diags.name, combined.len())?; } @@ -180,11 +234,19 @@ pub fn parse_all( cli, )?; } else { - print_diagnostic(&diag, &line_index, &url, &mut err_in_diag, cli)?; + print_diagnostic(&diag, &line_index, &mut err_in_diag, cli)?; } } } } + + telemetry::report_elapsed_time("parse-elp done", start_time); + + if args.is_format_normal() && args.report_system_stats { + print_memory_usage(loaded.analysis_host, loaded.vfs, cli)?; + writeln!(cli, "{}", memory_used)?; + } + if err_in_diag { bail!("Parse failures found") } else { @@ -208,8 +270,7 @@ fn print_diagnostic_json( cli, "{}", serde_json::to_string(&converted_diagnostic).unwrap_or_else(|err| panic!( - "print_diagnostics_json failed for '{:?}': {}", - converted_diagnostic, err + "print_diagnostics_json failed for '{converted_diagnostic:?}': {err}" )) )?; Ok(()) @@ -218,11 +279,10 @@ fn print_diagnostic_json( fn print_diagnostic( diag: &diagnostics::Diagnostic, line_index: &LineIndex, - url: &lsp_types::Url, err_in_diag: &mut bool, cli: &mut dyn Cli, ) -> Result<(), anyhow::Error> { - let diag = convert::ide_to_lsp_diagnostic(line_index, url, diag); + let diag = convert::ide_to_lsp_diagnostic(line_index, diag, |_file_id| None); let severity = match diag.severity { None => DiagnosticSeverity::ERROR, Some(sev) => { @@ -247,7 +307,7 @@ fn print_diagnostic( fn maybe_code_as_string(mc: Option) -> String { match mc { Some(ns) => match ns { - NumberOrString::Number(n) => format!("{}", n), + NumberOrString::Number(n) => format!("{n}"), NumberOrString::String(s) => s, }, None => "".to_string(), @@ -265,7 +325,6 @@ fn do_parse_all_par( let pb = cli.progress(module_iter.len() as u64, "Parsing modules"); - let vfs = &loaded.vfs; Ok(module_iter .par_bridge() .progress_with(pb) @@ -276,7 +335,7 @@ fn do_parse_all_par( && file_source == FileSource::Src && db.file_app_type(file_id).ok() != Some(Some(AppType::Dep)) { - do_parse_one(db, vfs, config, to, file_id, module_name.as_str()).unwrap() + do_parse_one(db, config, to, file_id, module_name.as_str()).unwrap() } else { None } @@ -297,7 +356,6 @@ fn do_parse_all_seq( let pb = cli.progress(module_iter.len() as u64, "Parsing modules (sequential)"); - let vfs = &loaded.vfs; let db = loaded.analysis(); Ok(module_iter .progress_with(pb) @@ -306,7 +364,7 @@ fn do_parse_all_seq( && file_source == FileSource::Src && db.file_app_type(file_id).ok() != Some(Some(AppType::Dep)) { - do_parse_one(&db, vfs, config, to, file_id, module_name.as_str()).unwrap() + do_parse_one(&db, config, to, file_id, module_name.as_str()).unwrap() } else { None } @@ -316,13 +374,11 @@ fn do_parse_all_seq( fn do_parse_one( db: &Analysis, - vfs: &Vfs, config: &DiagnosticsConfig, to: &Option, file_id: FileId, name: &str, ) -> Result> { - let url = file_id_to_url(vfs, file_id); let native = db.native_diagnostics(config, &vec![], file_id)?; let erlang_service_diagnostics = db.erlang_service_diagnostics(file_id, config, RemoveElpReported::Yes)?; @@ -336,16 +392,18 @@ fn do_parse_one( .unwrap_or(LabeledDiagnostics::default()); if let Some(to) = to { - let to_path = to.join(format!("{}.diag", name)); + let to_path = to.join(format!("{name}.diag")); let mut output = File::create(to_path)?; for diagnostic in native.iter() { - let diagnostic = convert::ide_to_lsp_diagnostic(&line_index, &url, diagnostic); - writeln!(output, "{:?}", diagnostic)?; + let diagnostic = + convert::ide_to_lsp_diagnostic(&line_index, diagnostic, |_file_id| None); + writeln!(output, "{diagnostic:?}")?; } for diagnostic in erlang_service.iter() { - let diagnostic = convert::ide_to_lsp_diagnostic(&line_index, &url, diagnostic); - writeln!(output, "{:?}", diagnostic)?; + let diagnostic = + convert::ide_to_lsp_diagnostic(&line_index, diagnostic, |_file_id| None); + writeln!(output, "{diagnostic:?}")?; } } if !(native.is_empty() && erlang_service.is_empty()) { diff --git a/crates/elp/src/bin/eqwalizer_cli.rs b/crates/elp/src/bin/eqwalizer_cli.rs index 23b078c1de..141b2157d0 100644 --- a/crates/elp/src/bin/eqwalizer_cli.rs +++ b/crates/elp/src/bin/eqwalizer_cli.rs @@ -10,6 +10,7 @@ use std::path::Path; use std::sync::Arc; +use std::time::SystemTime; use anyhow::Context; use anyhow::Result; @@ -38,6 +39,7 @@ use elp_ide::elp_ide_db::elp_base_db::FileId; use elp_ide::elp_ide_db::elp_base_db::IncludeOtp; use elp_ide::elp_ide_db::elp_base_db::ModuleName; use elp_ide::elp_ide_db::elp_base_db::VfsPath; +use elp_log::telemetry; use elp_project_model::AppName; use elp_project_model::DiscoverConfig; use elp_project_model::ProjectBuildData; @@ -76,6 +78,7 @@ pub fn eqwalize_module( cli: &mut dyn Cli, query_config: &BuckQueryConfig, ) -> Result<()> { + let start_time = SystemTime::now(); let config = DiscoverConfig::new(args.rebar, &args.profile); let mut loaded = load::load_project_at( cli, @@ -86,7 +89,10 @@ pub fn eqwalize_module( query_config, )?; build::compile_deps(&loaded, cli)?; - do_eqwalize_module(args, &mut loaded, cli) + telemetry::report_elapsed_time("eqwalize operational", start_time); + let r = do_eqwalize_module(args, &mut loaded, cli); + telemetry::report_elapsed_time("eqwalize done", start_time); + r } pub fn do_eqwalize_module( @@ -100,11 +106,10 @@ pub fn do_eqwalize_module( for module in &args.modules { let suggest_name = Path::new(module).file_stem().and_then(|name| name.to_str()); let context_str = match suggest_name { - Some(name) if name != module => format!( - "Module {} not found. Did you mean elp eqwalize {}?", - module, name - ), - _ => format!("Module {} not found", module), + Some(name) if name != module => { + format!("Module {module} not found. Did you mean elp eqwalize {name}?") + } + _ => format!("Module {module} not found"), }; let file_id = analysis .module_file_id(loaded.project_id, module)? @@ -144,6 +149,7 @@ pub fn eqwalize_all( cli: &mut dyn Cli, query_config: &BuckQueryConfig, ) -> Result<()> { + let start_time = SystemTime::now(); // Hack to avoid hint appearing in tests cli.spinner(SHELL_HINT).finish(); let config = DiscoverConfig::new(args.rebar, &args.profile); @@ -156,7 +162,10 @@ pub fn eqwalize_all( query_config, )?; build::compile_deps(&loaded, cli)?; - do_eqwalize_all(args, &mut loaded, cli) + telemetry::report_elapsed_time("eqwalize-all operational", start_time); + let r = do_eqwalize_all(args, &mut loaded, cli); + telemetry::report_elapsed_time("eqwalize-all done", start_time); + r } pub fn do_eqwalize_all( @@ -169,7 +178,7 @@ pub fn do_eqwalize_all( let module_index = analysis.module_index(loaded.project_id)?; let include_generated = args.include_generated; if include_generated { - write!(cli, "{}", DEPRECATED_INCLUDE_GENERATED)?; + write!(cli, "{DEPRECATED_INCLUDE_GENERATED}")?; } let pb = cli.progress(module_index.len_own() as u64, "Gathering modules"); let file_ids: Vec = module_index @@ -177,10 +186,7 @@ pub fn do_eqwalize_all( .par_bridge() .progress_with(pb.clone()) .map_with(analysis.clone(), |analysis, (name, _source, file_id)| { - if analysis - .should_eqwalize(file_id, args.include_tests) - .unwrap() - && !otp_file_to_ignore(analysis, file_id) + if analysis.should_eqwalize(file_id).unwrap() && !otp_file_to_ignore(analysis, file_id) { if args.stats { add_stat(name.to_string()); @@ -227,6 +233,7 @@ pub fn eqwalize_app( cli: &mut dyn Cli, query_config: &BuckQueryConfig, ) -> Result<()> { + let start_time = SystemTime::now(); let config = DiscoverConfig::new(args.rebar, &args.profile); let mut loaded = load::load_project_at( cli, @@ -237,7 +244,10 @@ pub fn eqwalize_app( query_config, )?; build::compile_deps(&loaded, cli)?; - do_eqwalize_app(args, &mut loaded, cli) + telemetry::report_elapsed_time("eqwalize-app operational", start_time); + let r = do_eqwalize_app(args, &mut loaded, cli); + telemetry::report_elapsed_time("eqwalize-app done", start_time); + r } pub fn do_eqwalize_app( @@ -250,15 +260,13 @@ pub fn do_eqwalize_app( let module_index = analysis.module_index(loaded.project_id)?; let include_generated = args.include_generated; if include_generated { - write!(cli, "{}", DEPRECATED_INCLUDE_GENERATED)?; + write!(cli, "{DEPRECATED_INCLUDE_GENERATED}")?; } let file_ids: Vec = module_index .iter_own() .filter_map(|(_name, _source, file_id)| { if analysis.file_app_name(file_id).ok()? == Some(AppName(args.app.clone())) - && analysis - .should_eqwalize(file_id, args.include_tests) - .unwrap() + && analysis.should_eqwalize(file_id).unwrap() && !otp_file_to_ignore(analysis, file_id) { Some(file_id) @@ -284,6 +292,7 @@ pub fn eqwalize_target( cli: &mut dyn Cli, query_config: &BuckQueryConfig, ) -> Result<()> { + let start_time = SystemTime::now(); let config = DiscoverConfig::buck(); let mut loaded = load::load_project_at( cli, @@ -295,6 +304,7 @@ pub fn eqwalize_target( )?; set_eqwalizer_config(&mut loaded); + telemetry::report_elapsed_time("eqwalize-target operational", start_time); let buck = match &loaded.project.project_build_data { ProjectBuildData::Buck(buck) => buck, @@ -307,7 +317,7 @@ pub fn eqwalize_target( let analysis = &loaded.analysis(); let include_generated = args.include_generated; if include_generated { - write!(cli, "{}", DEPRECATED_INCLUDE_GENERATED)?; + write!(cli, "{DEPRECATED_INCLUDE_GENERATED}")?; } let mut file_ids: Vec = Default::default(); let mut at_least_one_found = false; @@ -324,9 +334,7 @@ pub fn eqwalize_target( let vfs_path = VfsPath::from(src.clone()); if let Some((file_id, _)) = loaded.vfs.file_id(&vfs_path) { at_least_one_found = true; - if analysis - .should_eqwalize(file_id, args.include_tests) - .unwrap() + if analysis.should_eqwalize(file_id).unwrap() && !otp_file_to_ignore(analysis, file_id) { file_ids.push(file_id); @@ -354,13 +362,15 @@ elp eqwalize-target erl/chatd #same as //erl/chatd/... but enables shell complet let mut reporter = reporting::PrettyReporter::new(analysis, &loaded, cli); let bail_on_error = args.bail_on_error; - eqwalize(EqwalizerInternalArgs { + let r = eqwalize(EqwalizerInternalArgs { analysis, loaded: &loaded, file_ids, reporter: &mut reporter, bail_on_error, - }) + }); + telemetry::report_elapsed_time("eqwalize-target done", start_time); + r } pub fn eqwalize_stats( @@ -382,7 +392,7 @@ pub fn eqwalize_stats( let module_index = analysis.module_index(loaded.project_id)?; let include_generated = args.include_generated; if include_generated { - write!(cli, "{}", DEPRECATED_INCLUDE_GENERATED)?; + write!(cli, "{DEPRECATED_INCLUDE_GENERATED}")?; } let project_id = loaded.project_id; let pb = cli.progress(module_index.len_own() as u64, "Computing stats"); @@ -391,9 +401,7 @@ pub fn eqwalize_stats( .par_bridge() .progress_with(pb.clone()) .map_with(analysis.clone(), |analysis, (name, _source, file_id)| { - if analysis - .should_eqwalize(file_id, args.include_tests) - .expect("cancelled") + if analysis.should_eqwalize(file_id).expect("cancelled") && !otp_file_to_ignore(analysis, file_id) { analysis @@ -446,8 +454,7 @@ fn print_diagnostic_json( cli, "{}", serde_json::to_string(&converted_diagnostic).unwrap_or_else(|err| panic!( - "print_diagnostics_json failed for '{:?}': {}", - converted_diagnostic, err + "print_diagnostics_json failed for '{converted_diagnostic:?}': {err}" )) )?; Ok(()) @@ -466,8 +473,6 @@ fn eqwalize( bail!("No files to eqWAlize detected") } - pre_parse_for_speed(reporter, analysis.clone(), &file_ids); - let files_count = file_ids.len(); let pb = reporter.progress(files_count as u64, "EqWAlizing"); let output = loaded.with_eqwalizer_progress_bar(pb.clone(), move |analysis| { @@ -504,7 +509,7 @@ fn eqwalize( let file_id = analysis .module_index(loaded.project_id)? .file_for_module(module.as_str()) - .with_context(|| format!("module {} not found", module))?; + .with_context(|| format!("module {module} not found"))?; reporter.write_eqwalizer_diagnostics(file_id, &diagnostics)?; if !diagnostics.is_empty() { has_errors = true; @@ -559,8 +564,7 @@ fn eqwalize( // The cached parse errors must be non-empty otherwise we wouldn't have `NoAst` assert!( !parse_diagnostics.is_empty(), - "Expecting erlang service diagnostics, but none found, for '{}'", - module + "Expecting erlang service diagnostics, but none found, for '{module}'" ); let parse_diagnostics: Vec<_> = parse_diagnostics .into_iter() @@ -587,17 +591,6 @@ fn eqwalize( } } -fn pre_parse_for_speed(reporter: &dyn Reporter, analysis: Analysis, file_ids: &[FileId]) { - let pb = reporter.progress(file_ids.len() as u64, "Parsing modules"); - file_ids - .par_iter() - .progress_with(pb.clone()) - .for_each_with(analysis, |analysis, &file_id| { - let _ = analysis.module_ast(file_id); - }); - pb.finish(); -} - fn set_eqwalizer_config(loaded: &mut LoadResult) { let config = EqwalizerConfig::default(); let db = loaded.analysis_host.raw_database_mut(); diff --git a/crates/elp/src/bin/erlang_service_cli.rs b/crates/elp/src/bin/erlang_service_cli.rs index 06e596018b..5b39a4aa06 100644 --- a/crates/elp/src/bin/erlang_service_cli.rs +++ b/crates/elp/src/bin/erlang_service_cli.rs @@ -11,6 +11,7 @@ use std::fs; use std::path::Path; use std::str; +use std::time::SystemTime; use anyhow::Context; use anyhow::Error; @@ -26,6 +27,7 @@ use elp_ide::Analysis; use elp_ide::elp_ide_db::elp_base_db::FileId; use elp_ide::elp_ide_db::elp_base_db::IncludeOtp; use elp_ide::erlang_service::DiagnosticLocation; +use elp_log::telemetry; use elp_log::timeit; use elp_project_model::AppType; use elp_project_model::DiscoverConfig; @@ -40,6 +42,7 @@ use crate::reporting::add_stat; use crate::reporting::dump_stats; pub fn parse_all(args: &ParseAll, cli: &mut dyn Cli, query_config: &BuckQueryConfig) -> Result<()> { + let start_time = SystemTime::now(); let config = DiscoverConfig::new(!args.buck, &args.profile); let loaded = load::load_project_at( cli, @@ -52,10 +55,15 @@ pub fn parse_all(args: &ParseAll, cli: &mut dyn Cli, query_config: &BuckQueryCon build::compile_deps(&loaded, cli)?; fs::create_dir_all(&args.to)?; + telemetry::report_elapsed_time("parse-all operational", start_time); + let parse_diagnostics = do_parse_all(cli, &loaded, &args.to, &args.module, args.buck)?; if args.stats { dump_stats(cli, args.list_modules); } + + telemetry::report_elapsed_time("parse-all done", start_time); + if !parse_diagnostics.is_empty() { writeln!( cli, @@ -129,7 +137,7 @@ pub fn do_parse_one( let result = db.module_ast(file_id)?; if result.is_ok() { if let Some((name, to)) = to { - let to_path = to.join(format!("{}.etf", name)); + let to_path = to.join(format!("{name}.etf")); fs::write(to_path, &*result.ast)?; } Ok(vec![]) @@ -142,14 +150,15 @@ pub fn do_parse_one( .chain(result.warnings.iter()) .map(|err| { let relative_path: &Path = err.path.strip_prefix(root_dir).unwrap_or(&err.path); - let (range, line_num) = match err.location { + let (range, line_num) = match &err.location { None => (None, convert::position(&line_index, 0.into()).line + 1), Some(DiagnosticLocation::Normal(range)) => ( Some(range), convert::position(&line_index, range.start()).line + 1, ), Some(DiagnosticLocation::Included { - directive_location, + file_attribute_location: directive_location, + error_path: _, error_location: _, }) => ( Some(directive_location), @@ -161,7 +170,7 @@ pub fn do_parse_one( relative_path: relative_path.to_owned(), line_num, msg: err.msg.to_owned(), - range, + range: range.copied(), } }) .collect(); diff --git a/crates/elp/src/bin/explain_cli.rs b/crates/elp/src/bin/explain_cli.rs index 5f00228aa3..feba2f77ba 100644 --- a/crates/elp/src/bin/explain_cli.rs +++ b/crates/elp/src/bin/explain_cli.rs @@ -15,11 +15,11 @@ use elp_ide::diagnostics::DiagnosticCode; use crate::args::Explain; pub fn explain(args: &Explain, cli: &mut dyn Cli) -> Result<()> { - if let Some(code) = DiagnosticCode::maybe_from_string(&args.code) { - if let Some(uri) = DiagnosticCode::as_uri(&code) { - let label = code.as_label(); - return Ok(writeln!(cli, "{uri} ({label})")?); - } + if let Some(code) = DiagnosticCode::maybe_from_string(&args.code) + && let Some(uri) = DiagnosticCode::as_uri(&code) + { + let label = code.as_label(); + return Ok(writeln!(cli, "{uri} ({label})")?); } Ok(writeln!(cli, "Unkwnown code: {}", args.code)?) } diff --git a/crates/elp/src/bin/glean.rs b/crates/elp/src/bin/glean.rs index 3eb58e0836..cb420261d2 100644 --- a/crates/elp/src/bin/glean.rs +++ b/crates/elp/src/bin/glean.rs @@ -8,9 +8,9 @@ * above-listed licenses. */ +use core::option::Option::None; use std::io::Write; use std::mem; -use std::path::Path; use anyhow::Result; use elp::build::load; @@ -24,6 +24,7 @@ use elp_ide::elp_ide_db::EqwalizerDatabase; use elp_ide::elp_ide_db::LineIndexDatabase; use elp_ide::elp_ide_db::RootDatabase; use elp_ide::elp_ide_db::docs::DocDatabase; +use elp_ide::elp_ide_db::docs::Documentation; use elp_ide::elp_ide_db::elp_base_db::FileId; use elp_ide::elp_ide_db::elp_base_db::IncludeOtp; use elp_ide::elp_ide_db::elp_base_db::ModuleName; @@ -51,9 +52,11 @@ use hir::DefineId; use hir::Expr; use hir::ExprId; use hir::ExprSource; +use hir::File; use hir::InFile; use hir::Literal; use hir::MacroName; +use hir::Module; use hir::Name; use hir::NameArity; use hir::PPDirective; @@ -81,7 +84,7 @@ const REC_ARITY: u32 = 99; const HEADER_ARITY: u32 = 100; const FACTS_FILE: &str = "facts.json"; -// @fb-only +// @fb-only: mod meta_only; #[derive(Serialize, Debug, Eq, Hash, PartialEq, Clone)] struct GleanFileId(u32); @@ -89,7 +92,6 @@ struct GleanFileId(u32); #[derive(Clone, Debug, Default)] struct IndexConfig { pub multi: bool, - pub prefix: Option, } impl From for FileId { @@ -143,6 +145,45 @@ impl FileLinesFact { } } +#[derive(Serialize, Debug)] +pub(crate) struct ModuleFact { + #[serde(rename = "file")] + file_id: GleanFileId, + name: String, + #[serde(skip_serializing_if = "Option::is_none")] + oncall: Option, + #[serde(skip_serializing_if = "Option::is_none")] + exports: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + behaviours: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + module_doc: Option, + #[serde(skip_serializing_if = "Option::is_none")] + exdoc_link: Option, +} + +impl ModuleFact { + fn new( + file_id: FileId, + name: String, + oncall: Option, + exports: Option>, + behaviours: Option>, + module_doc: Option, + exdoc_link: Option, + ) -> Self { + Self { + file_id: file_id.into(), + name, + oncall, + exports, + behaviours, + module_doc, + exdoc_link, + } + } +} + #[derive(Serialize, Debug)] pub(crate) struct FunctionDeclarationFact { #[serde(rename = "file")] @@ -237,6 +278,8 @@ pub(crate) enum Fact { XRef { facts: Vec> }, #[serde(rename = "erlang.DeclarationComment")] DeclarationComment { facts: Vec> }, + #[serde(rename = "erlang.Module")] + Module { facts: Vec> }, } #[derive(Serialize, Debug)] @@ -300,6 +343,10 @@ pub(crate) struct MacroTarget { expansion: Option, #[serde(skip_serializing_if = "Option::is_none")] ods_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + logview_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + scuba_urls: Option>, } #[derive(Serialize, Debug)] @@ -314,6 +361,8 @@ pub(crate) struct RecordTarget { #[serde(rename = "file")] file_id: GleanFileId, name: String, + #[serde(skip_serializing_if = "Option::is_none")] + wam_url: Option, } #[derive(Serialize, Debug)] @@ -434,6 +483,7 @@ struct IndexedFacts { file_line_facts: Vec, declaration_facts: Vec, xref_facts: Vec, + module_facts: Vec, //v2 facts file_declarations: Vec, xref_v2: Vec, @@ -446,6 +496,7 @@ impl IndexedFacts { decl: FileDeclaration, xref: XRefFile, facts_v1: Option<(Vec, XRefFact)>, + module_fact: Option, ) -> Self { let mut facts = Self::default(); facts.file_facts.push(file_fact); @@ -456,6 +507,9 @@ impl IndexedFacts { facts.declaration_facts.extend(decl); facts.xref_facts.push(xref); } + if let Some(module_fact) = module_fact { + facts.module_facts.push(module_fact); + } facts } @@ -466,6 +520,7 @@ impl IndexedFacts { decl: FileDeclaration, xref: XRefFile, facts: Option<(Vec, XRefFact)>, + module_fact: Option, ) { self.file_facts.push(file_fact); self.file_line_facts.push(line_fact); @@ -475,6 +530,9 @@ impl IndexedFacts { self.declaration_facts.extend(decl); self.xref_facts.push(xref); } + if let Some(module_fact) = module_fact { + self.module_facts.push(module_fact); + } } fn into_v1_facts(mut self) -> Vec { @@ -680,6 +738,8 @@ impl IndexedFacts { }); } let xref_fact = xrefs.into_iter().map_into().collect(); + let module_facts = mem::take(&mut self.module_facts); + let module_facts = module_facts.into_iter().map_into().collect(); vec![ Fact::File { facts: mem::take(&mut self.file_facts), @@ -692,6 +752,9 @@ impl IndexedFacts { }, Fact::XRef { facts: xref_fact }, Fact::DeclarationComment { facts: comments }, + Fact::Module { + facts: module_facts, + }, ] } } @@ -704,10 +767,7 @@ pub struct GleanIndexer { pub fn index(args: &Glean, cli: &mut dyn Cli, query_config: &BuckQueryConfig) -> Result<()> { let (indexer, _loaded) = GleanIndexer::new(args, cli, query_config)?; - let config = IndexConfig { - multi: args.multi, - prefix: args.prefix.clone(), - }; + let config = IndexConfig { multi: args.multi }; let (facts, module_index) = indexer.index(config)?; write_results(facts, module_index, cli, args) } @@ -796,56 +856,42 @@ impl GleanIndexer { let source_root_id = db.file_source_root(file_id); let source_root = db.source_root(source_root_id); let path = source_root.path_for_file(&file_id).unwrap(); - match Self::index_file( - db, - file_id, - path, - project_id, - &module_index, - config.prefix.as_ref(), - ) { - Some((file, line, decl, xref, facts)) => { + match Self::index_file(db, file_id, path, project_id, &module_index) { + Some((file, line, decl, xref, facts, module_fact)) => { let mut result = FxHashMap::default(); result.insert( FACTS_FILE.to_string(), - IndexedFacts::new(file, line, decl, xref, facts), + IndexedFacts::new(file, line, decl, xref, facts, module_fact), ); result } - None => panic!("Can't find module {}", module), + None => panic!("Can't find module {module}"), } } else { let iter = files .into_par_iter() .map_with(self.analysis.clone(), |analysis, (file_id, path)| { analysis.with_db(|db| { - Self::index_file( - db, - file_id, - &path, - project_id, - &module_index, - config.prefix.as_ref(), - ) + Self::index_file(db, file_id, &path, project_id, &module_index) }) }) .flatten() .flatten(); if config.multi { - iter.map(|(file, line, decl, xref, facts)| { - IndexedFacts::new(file, line, decl, xref, facts) + iter.map(|(file, line, decl, xref, facts, module_fact)| { + IndexedFacts::new(file, line, decl, xref, facts, module_fact) }) .collect::>() .into_iter() .enumerate() - .map(|(id, facts)| (format!("{}.json", id), facts)) + .map(|(id, facts)| (format!("{id}.json"), facts)) .collect() } else { let mut result = FxHashMap::default(); let facts = iter.collect::>().into_iter().fold( IndexedFacts::default(), - |mut acc, (file_fact, line_fact, decl, xref, facts)| { - acc.add(file_fact, line_fact, decl, xref, facts); + |mut acc, (file_fact, line_fact, decl, xref, facts, module_fact)| { + acc.add(file_fact, line_fact, decl, xref, facts, module_fact); acc }, ); @@ -862,13 +908,13 @@ impl GleanIndexer { let project_data = db.project_data(project_id); let mut files = vec![]; for &source_root_id in &project_data.source_roots { - if let Some(app_data) = db.app_data(source_root_id) { - if app_data.app_type == AppType::App { - let source_root = db.source_root(source_root_id); - for file_id in source_root.iter() { - if let Some(path) = source_root.path_for_file(&file_id) { - files.push((file_id, path.clone())); - } + if let Some(app_data) = db.app_data(source_root_id) + && app_data.app_type == AppType::App + { + let source_root = db.source_root(source_root_id); + for file_id in source_root.iter() { + if let Some(path) = source_root.path_for_file(&file_id) { + files.push((file_id, path.clone())); } } } @@ -883,15 +929,15 @@ impl GleanIndexer { path: &VfsPath, project_id: ProjectId, module_index: &FxHashMap, - prefix: Option<&String>, ) -> Option<( FileFact, FileLinesFact, FileDeclaration, XRefFile, Option<(Vec, XRefFact)>, + Option, )> { - let file_fact = Self::file_fact(db, file_id, path, project_id, prefix)?; + let file_fact = Self::file_fact(db, file_id, path, project_id)?; let line_fact = Self::line_fact(db, file_id); let mut xref_v2 = Self::xrefs_v2(db, file_id, module_index); let mut file_decl = Self::declarations_v2(db, file_id, path)?; @@ -901,9 +947,65 @@ impl GleanIndexer { if let Some(module) = elp_module_index.module_for_file(file_id) { let decl = Self::declarations_v1(db, file_id, module); let xref = Self::xrefs(db, file_id, module_index); - return Some((file_fact, line_fact, file_decl, xref_v2, Some((decl, xref)))); + let module_fact = Self::module_fact(db, file_id, module); + return Some(( + file_fact, + line_fact, + file_decl, + xref_v2, + Some((decl, xref)), + Some(module_fact), + )); } - Some((file_fact, line_fact, file_decl, xref_v2, None)) + Some((file_fact, line_fact, file_decl, xref_v2, None, None)) + } + + fn module_fact(db: &RootDatabase, file_id: FileId, module_name: &ModuleName) -> ModuleFact { + let module = Module { + file: File { file_id }, + }; + let name = module_name.to_string(); + + // Extract exported functions from def_map + let def_map = db.def_map_local(file_id); + + let mut exports = vec![]; + for (fun, def) in def_map.get_functions() { + if def.exported { + exports.push(format!("{}/{}", fun.name(), fun.arity())); + } + } + + // Extract oncall, behaviour, and moduledoc using form_list API + let sema = Semantic::new(db); + let form_list = sema.form_list(file_id); + + let oncall = sema.attribute_value_as_string(file_id, hir::known::oncall); + + let behaviours: Vec = form_list + .behaviour_attributes() + .map(|(_, behaviour)| behaviour.name.to_string()) + .collect(); + + let module_doc = sema.module_attribute(file_id).and_then(|module_attribute| { + let docs = Documentation::new(db, &sema); + docs.to_doc(InFile::new(file_id, &module_attribute)) + .map(|doc| doc.markdown_text().to_string()) + .filter(|text| !text.is_empty()) + }); + + // @fb-only: let exdoc_link = elp_ide::meta_only::exdoc_links::module_exdoc_link(&module, &sema); + let exdoc_link: Option = None; // @oss-only + + ModuleFact::new( + file_id, + name, + oncall, + (!exports.is_empty()).then_some(exports), + (!behaviours.is_empty()).then_some(behaviours), + module_doc, + exdoc_link, + ) } fn add_xref_based_declarations( @@ -927,13 +1029,44 @@ impl GleanIndexer { if let Some(def) = def { let range = def.source(db).syntax().text_range(); let text = &db.file_text(id)[range]; - let text = format!("```erlang\n{}\n```", text); - let doc = match (&x.key.expansion, &x.key.ods_url) { - (None, None) => text, - (None, Some(o)) => format!("[ODS]({})\n{}", o, text), - (Some(e), None) => format!("{}\n---\n\n{}", text, e), - (Some(e), Some(o)) => format!("[ODS]({})\n{}\n---\n\n{}", o, text, e), + let text = format!("```erlang\n{text}\n```"); + let scuba_links = + x.key + .scuba_urls + .as_ref() + .map_or(String::new(), |scuba_urls| { + scuba_urls + .iter() + .map(|(display_name, url)| { + format!("[{}]({})", display_name, url) + }) + .collect::>() + .join(" | ") + }); + + let scuba_section = if !scuba_links.is_empty() { + format!("Scuba: {}\n", scuba_links) + } else { + String::new() }; + + let doc = format!( + "{}{}{}{}{}", + x.key + .ods_url + .as_ref() + .map_or(String::new(), |o| format!("[ODS]({})\n", o)), + x.key + .logview_url + .as_ref() + .map_or(String::new(), |l| format!("[LogView]({})\n", l)), + scuba_section, + text, + x.key + .expansion + .as_ref() + .map_or(String::new(), |e| format!("\n---\n\n{}", e)) + ); let decl = Declaration::MacroDeclaration( MacroDecl { name: x.key.name.clone(), @@ -956,6 +1089,38 @@ impl GleanIndexer { x.key.arity = Some(xref.source.start); } } + XRefTarget::Record(x) => { + // Add WAM documentation for records that have wam_url + if let Some(wam_url) = &x.key.wam_url { + let id: FileId = x.key.file_id.clone().into(); + let def_map = db.def_map(id); + let record_name = Name::from_erlang_service(&x.key.name); + if let Some(def) = def_map.get_record(&record_name) { + let range = def.source(db).syntax().text_range(); + let text = &db.file_text(id)[range]; + let text = format!("```erlang\n{text}\n```"); + let doc = format!("[WAM]({})\n{}", wam_url, text); + let decl = Declaration::RecordDeclaration( + RecordDecl { + name: x.key.name.clone(), + span: xref.source.clone(), + } + .into(), + ); + let doc_decl = Declaration::DocDeclaration( + DocDecl { + target: Box::new(decl.clone()), + span: xref.source.clone(), + text: doc, + } + .into(), + ); + file_decl.declarations.push(decl); + file_decl.declarations.push(doc_decl); + x.key.file_id = file_id.into(); + } + } + } _ => (), } } @@ -988,16 +1153,12 @@ impl GleanIndexer { file_id: FileId, path: &VfsPath, project_id: ProjectId, - prefix: Option<&String>, ) -> Option { let project_data = db.project_data(project_id); let root = project_data.root_dir.as_path(); let file_path = path.as_path()?; let file_path = file_path.strip_prefix(root)?; - let file_path = match prefix { - Some(prefix) => Path::new(&prefix).join(file_path).to_str()?.into(), - None => file_path.as_str().to_string(), - }; + let file_path = file_path.as_str().to_string(); Some(FileFact::new(file_id, file_path)) } @@ -1113,7 +1274,7 @@ impl GleanIndexer { for (ty, def) in def_map.get_types() { let range = def.source(db).syntax().text_range(); let text = &db.file_text(file_id)[range]; - let text = format!("```erlang\n{}\n```", text); + let text = format!("```erlang\n{text}\n```"); let span: Location = range.into(); let decl = Declaration::TypeDeclaration( @@ -1140,7 +1301,7 @@ impl GleanIndexer { for (rec, def) in def_map.get_records() { let range = def.source(db).syntax().text_range(); let text = &db.file_text(file_id)[range]; - let text = format!("```erlang\n{}\n```", text); + let text = format!("```erlang\n{text}\n```"); let span: Location = range.into(); let decl = Declaration::RecordDeclaration( @@ -1165,7 +1326,7 @@ impl GleanIndexer { if let Some((name, Some("hrl"))) = path.name_and_extension() { declarations.push(Declaration::HeaderDeclaration( HeaderDecl { - name: format!("{}.hrl", name), + name: format!("{name}.hrl"), span: Location { start: 0, length: 1, @@ -1198,54 +1359,52 @@ impl GleanIndexer { vec![], &mut |mut acc, ctx| match &ctx.item { hir::AnyExpr::Expr(Expr::Call { target, args }) => { - if let Some((body, _, expr_source)) = ctx.body_with_expr_source(&sema) { - if let Some(range) = + if let Some((body, _, expr_source)) = ctx.body_with_expr_source(&sema) + && let Some(range) = Self::find_range(&sema, &ctx, &source_file, &expr_source) + { + let arity = args.len() as u32; + if let Some(fact) = + Self::resolve_call(&sema, target, arity, file_id, &body, range) { - let arity = args.len() as u32; - if let Some(fact) = - Self::resolve_call(&sema, target, arity, file_id, &body, range) - { - acc.push(fact); - } + acc.push(fact); } } acc } hir::AnyExpr::Expr(Expr::CaptureFun { target, arity }) => { - if let Some((body, range)) = ctx.find_range(&sema) { - if range.file_id == file_id { - let arity: Option = match &body[*arity] { - Expr::Literal(Literal::Integer(int)) => int.value.try_into().ok(), - _ => None, - }; - if let Some(arity) = arity { - if let Some(fact) = Self::resolve_call( - &sema, - target, - arity, - file_id, - &body, - range.range, - ) { - acc.push(fact); - } - } + if let Some((body, range)) = ctx.find_range(&sema) + && range.file_id == file_id + { + let arity: Option = match &body[*arity] { + Expr::Literal(Literal::Integer(int)) => int.value.try_into().ok(), + _ => None, + }; + if let Some(arity) = arity + && let Some(fact) = Self::resolve_call( + &sema, + target, + arity, + file_id, + &body, + range.range, + ) + { + acc.push(fact); } } acc } hir::AnyExpr::TypeExpr(TypeExpr::Call { target, args }) => { - if let Some((body, _, expr_source)) = ctx.body_with_expr_source(&sema) { - if let Some(range) = + if let Some((body, _, expr_source)) = ctx.body_with_expr_source(&sema) + && let Some(range) = Self::find_range(&sema, &ctx, &source_file, &expr_source) + { + let arity = args.len() as u32; + if let Some(fact) = + Self::resolve_type(&sema, target, arity, file_id, &body, range) { - let arity = args.len() as u32; - if let Some(fact) = - Self::resolve_type(&sema, target, arity, file_id, &body, range) - { - acc.push(fact); - } + acc.push(fact); } } acc @@ -1373,7 +1532,7 @@ impl GleanIndexer { }) => { let def = macro_def.as_ref()?; let mut resolved = Self::resolve_macro_v2(sema, def, source_file, ctx)?; - // @fb-only + // @fb-only: meta_only::resolve_macro_expansion(sema, *expansion, ctx, &mut resolved); Some(resolved) } hir::AnyExpr::Pat(Pat::MacroCall { macro_def, .. }) @@ -1401,7 +1560,7 @@ impl GleanIndexer { vars: FxHashMap<&Location, &String>, ) -> Vec { let mut result = vec![]; - if !db.is_eqwalizer_enabled(file_id, false) { + if !db.is_eqwalizer_enabled(file_id) { return result; } let module_diagnostics = db.eqwalizer_diagnostics_by_project(project_id, vec![file_id]); @@ -1416,7 +1575,7 @@ impl GleanIndexer { let range: TextRange = range.clone().into(); let range: Location = range.into(); if let Some(name) = vars.get(&range) { - let text = format!("```erlang\n{} :: {}\n```", name, ty); + let text = format!("```erlang\n{name} :: {ty}\n```"); let decl = VarDecl { name: name.to_string(), doc: text, @@ -1458,20 +1617,19 @@ impl GleanIndexer { let include = &form_list[*idx]; let ast = include.form_id().get_ast(db, file_id); let range = ast.syntax().text_range().into(); - if let Some(file) = db.resolve_include(InFile::new(file_id, *idx)) { - if let Some(path) = path_for_file(db, file) { - if let Some((name, Some("hrl"))) = path.name_and_extension() { - let target = HeaderTarget { - file_id: file.into(), - name: format!("{}.hrl", name), - }; - let xref = XRef { - source: range, - target: XRefTarget::Header(target.into()), - }; - acc.push(xref); - } - } + if let Some(file) = db.resolve_include(InFile::new(file_id, *idx)) + && let Some(path) = path_for_file(db, file) + && let Some((name, Some("hrl"))) = path.name_and_extension() + { + let target = HeaderTarget { + file_id: file.into(), + name: format!("{name}.hrl"), + }; + let xref = XRef { + source: range, + target: XRefTarget::Header(target.into()), + }; + acc.push(xref); } } acc @@ -1480,20 +1638,20 @@ impl GleanIndexer { let export = &form_list[idx]; let ast = export.form_id.get_ast(db, file_id); for fun in ast.funs() { - if let Some(na) = Self::fa_name_arity(&fun) { - if let Some(def) = sema.def_map(file_id).get_function(&na) { - let range = fun.syntax().text_range().into(); - let target = FunctionTarget { - file_id: def.file.file_id.into(), - name: na.name().to_string(), - arity: na.arity(), - }; - let xref = XRef { - source: range, - target: XRefTarget::Function(target.into()), - }; - acc.push(xref); - } + if let Some(na) = Self::fa_name_arity(&fun) + && let Some(def) = sema.def_map(file_id).get_function(&na) + { + let range = fun.syntax().text_range().into(); + let target = FunctionTarget { + file_id: def.file.file_id.into(), + name: na.name().to_string(), + arity: na.arity(), + }; + let xref = XRef { + source: range, + target: XRefTarget::Function(target.into()), + }; + acc.push(xref); } } acc @@ -1622,6 +1780,8 @@ impl GleanIndexer { arity: define.name.arity(), expansion, ods_url: None, + logview_url: None, + scuba_urls: None, }; Some(XRef { source: range.into(), @@ -1638,7 +1798,7 @@ impl GleanIndexer { if let ast::Expr::ExprMax(ExprMax::MacroCallExpr(macro_call)) = node { let (_, expansion) = sema.expand(InFile::new(source_file.file_id, ¯o_call))?; let expansion = expansion.trim(); - let expansion = format!("```erlang\n{}\n```", expansion); + let expansion = format!("```erlang\n{expansion}\n```"); return Some(expansion); } None @@ -1714,12 +1874,19 @@ impl GleanIndexer { let (_, _, expr_source) = ctx.body_with_expr_source(sema)?; let source_file = sema.parse(file_id); let range = Self::find_range(sema, ctx, &source_file, &expr_source)?; + + // @fb-only: use elp_ide::meta_only::wam_links; + // @fb-only: let wam_ctx = wam_links::WamEventCtx::new(sema.db.upcast()); + // @fb-only: let wam_url = wam_ctx.build_wam_link(name).map(|link| link.url()); + let wam_url = None; // @oss-only + Some(XRef { source: range.into(), target: XRefTarget::Record( RecordTarget { file_id: def.file.file_id.into(), name: def.record.name.to_string(), + wam_url, } .into(), ), @@ -1766,7 +1933,7 @@ impl GleanIndexer { fn path_to_module_name(path: &VfsPath) -> Option { match path.name_and_extension() { Some((name, Some("erl"))) => Some(name.to_string()), - Some((name, Some("hrl"))) => Some(format!("{}.hrl", name)), + Some((name, Some("hrl"))) => Some(format!("{name}.hrl")), _ => None, } } @@ -1804,22 +1971,22 @@ mod tests { start: 0, length: 10, }; - let module = "smax_product_catalog"; - let name = "product_visibility_update_request_iq"; + let module_name = "test_module"; + let func_name = "test_function"; let arity = 0; - let file_facts = vec![ - FileFact::new( - file_id, - "/local/whatsapp/server/erl/groupd_service/test/p13n/grpd_p13n_new_create_group_SUITE.erl".into(), - ) - ]; + let file_facts = vec![FileFact::new( + file_id, + "/test/app/src/test_module.erl".into(), + )]; + let file_line_facts = vec![FileLinesFact::new(file_id, vec![71, 42], true)]; + let decl = FileDeclaration { file_id: file_id.into(), declarations: vec![Declaration::FunctionDeclaration( FuncDecl { - name: name.to_string(), + name: func_name.to_string(), arity, span: location.clone(), exported: false, @@ -1828,6 +1995,7 @@ mod tests { .into(), )], }; + let xref = XRefFile { file_id: file_id.into(), xrefs: vec![XRef { @@ -1835,7 +2003,7 @@ mod tests { target: XRefTarget::Function( FunctionTarget { file_id: file_id.into(), - name: name.to_string(), + name: func_name.to_string(), arity, } .into(), @@ -1843,11 +2011,22 @@ mod tests { }], }; + let module = ModuleFact { + file_id: file_id.into(), + name: module_name.to_string(), + oncall: Some("test_team".to_string()), + exports: Some(vec![format!("{func_name}/{arity}")]), + behaviours: Some(vec!["test_behaviour".to_string()]), + module_doc: Some("Test module documentation".to_string()), + exdoc_link: Some("https://example.com/docs/test_module.html".to_string()), + }; + let facts = IndexedFacts { file_facts, file_line_facts, declaration_facts: vec![], xref_facts: vec![], + module_facts: vec![module], file_declarations: vec![decl], xref_v2: vec![xref], }; @@ -1860,16 +2039,14 @@ mod tests { v2: true, pretty: false, multi: false, - prefix: None, }; let mut module_index = FxHashMap::default(); - module_index.insert(file_id.into(), module.to_string()); + module_index.insert(file_id.into(), module_name.to_string()); write_results(map, module_index, &mut cli, &args).expect("success"); - let (out, err) = cli.to_strings(); let expected = expect_file!["../resources/test/glean/serialization_test.out"]; - expected.assert_eq(&out); + assert_eq!(expected.data().trim(), &out); assert_eq!(err, "") } @@ -1888,25 +2065,6 @@ mod tests { ); } - #[test] - fn file_fact_prefix_test() { - let spec = r#" - //- /glean/app_glean/src/glean_module2.erl - -module(glean_module2). - "#; - let config = IndexConfig { - multi: false, - prefix: Some("my/prefix".to_string()), - }; - let result = facts_with_annotations_with_config(spec, config).0; - assert_eq!(result.file_facts.len(), 1); - let file_fact = &result.file_facts[0]; - assert_eq!( - file_fact.file_path.as_str(), - "my/prefix/glean/app_glean/src/glean_module2.erl" - ); - } - #[test] fn line_fact_with_new_line_test() { let spec = r#" @@ -2177,10 +2335,10 @@ mod tests { fn xref_types_test() { let spec = r#" //- /glean/app_glean/src/glean_module81.erl - -type small() :: #{non_neg_integer() | infinity}. + -type small() :: {non_neg_integer() | infinity}. //- /glean/app_glean/src/glean_module8.erl - -type huuuge() :: #{non_neg_integer() | infinity}. + -type huuuge() :: {non_neg_integer() | infinity}. -spec baz( A :: huuuge(), %% ^^^^^^ glean_module8/huuuge/0 @@ -2235,10 +2393,10 @@ mod tests { fn xref_types_v2_test() { let spec = r#" //- /glean/app_glean/src/glean_module81.erl - -type small() :: #{non_neg_integer() | infinity}. + -type small() :: {non_neg_integer() | infinity}. //- /glean/app_glean/src/glean_module8.erl - -type huuuge() :: #{non_neg_integer() | infinity}. + -type huuuge() :: {non_neg_integer() | infinity}. -spec baz( A :: huuuge(), %% ^^^^^^ glean_module8.erl/type/huuuge/0 @@ -2277,7 +2435,7 @@ mod tests { }). baz(A) -> #query{ size = A }. - %% ^^^^^^ glean_module9.erl/rec/query + %% ^^^^^^ glean_module9.erl/rec/query/no_wam "#; xref_v2_check(spec); @@ -2307,9 +2465,9 @@ mod tests { -record(stats, {count, time}). baz(Time) -> [{#stats.count, 1}, - %% ^^^^^^ glean_module10.erl/rec/stats + %% ^^^^^^ glean_module10.erl/rec/stats/no_wam {#stats.time, Time}]. - %% ^^^^^^ glean_module10.erl/rec/stats + %% ^^^^^^ glean_module10.erl/rec/stats/no_wam "#; @@ -2338,7 +2496,7 @@ mod tests { -record(stats, {count, time}). baz(Stats) -> Stats#stats.count. - %% ^^^^^^ glean_module11.erl/rec/stats + %% ^^^^^^ glean_module11.erl/rec/stats/no_wam "#; xref_v2_check(spec); @@ -2366,7 +2524,7 @@ mod tests { -record(stats, {count, time}). baz(Stats, NewCnt) -> Stats#stats{count = NewCnt}. - %% ^^^^^^ glean_module12.erl/rec/stats + %% ^^^^^^ glean_module12.erl/rec/stats/no_wam "#; xref_v2_check(spec); @@ -2396,7 +2554,7 @@ mod tests { -record(stats, {count, time}). baz(Stats) -> #stats{count = Count, time = Time} = Stats. - %% ^^^^^^ glean_module13.erl/rec/stats + %% ^^^^^^ glean_module13.erl/rec/stats/no_wam "#; xref_v2_check(spec); @@ -2420,7 +2578,7 @@ mod tests { //- /glean/app_glean/src/glean_module14.erl -record(rec, {field}). foo(#rec.field) -> ok. - %% ^^^^ glean_module14.erl/rec/rec + %% ^^^^ glean_module14.erl/rec/rec/no_wam "#; xref_v2_check(spec); @@ -2446,10 +2604,10 @@ mod tests { //- /glean/app_glean/src/glean_module15.erl -record(stats, {count, time}). -spec baz() -> #stats{}. - %% ^^^^^^ glean_module15.erl/rec/stats + %% ^^^^^^ glean_module15.erl/rec/stats/no_wam baz() -> #stats{count = 1, time = 2}. - %% ^^^^^^ glean_module15.erl/rec/stats + %% ^^^^^^ glean_module15.erl/rec/stats/no_wam "#; xref_v2_check(spec); @@ -2470,25 +2628,12 @@ mod tests { baz(1) -> ?TAU; %% ^^^ macro.erl/macro/TAU/117/no_ods/6.28 baz(N) -> ?MAX(N, 200). - %% ^^^ macro.erl/macro/MAX/137/no_ods/if (N > 200) -> N; 'true' -> 200 end + %% ^^^ macro.erl/macro/MAX/137/no_ods/if (N > 200) -> N; true -> 200 end "#; xref_v2_check(spec); } - #[test] - fn xref_macro_ods_v2_test() { - let spec = r#" - //- /src/macro.erl - -module(macro). - -define(COUNT_INFRA(X), wa_stats_counter:count(X)). - baz(atom) -> ?COUNT_INFRA(atom), - %% ^^^^^^^^^^^ macro.erl/macro/COUNT_INFRA/94/has_ods/'wa_stats_counter':'count'( 'atom' ) - - "#; - // @fb-only - } - #[test] fn xref_macro_in_pat_v2_test() { let spec = r#" @@ -2511,7 +2656,7 @@ mod tests { -define(TYPE, integer()). -spec baz(ok) -> ?TYPE. - %% ^^^^ macro.erl/macro/TYPE/73/no_ods/'erlang':'integer'() + %% ^^^^ macro.erl/macro/TYPE/73/no_ods/erlang:integer() baz(ok) -> 1. "#; @@ -2525,14 +2670,114 @@ mod tests { -module(macro). -define(FOO(X), X). -wild(?FOO(atom)). - %% ^^^ macro.erl/macro/FOO/53/no_ods/'atom' + %% ^^^ macro.erl/macro/FOO/53/no_ods/atom "#; xref_v2_check(spec); } + #[test] + fn module_fact_test() { + let spec = r#" + //- /src/sample_worker.erl + %%% This is a module documentation + %%% It explains what this module does + -module(sample_worker). + -oncall("platform_team"). + -behaviour(test_behaviour). + -export([init/1, handle_task/2]). + + init(Args) -> {ok, Args}. + handle_task(Task, State) -> {reply, ok, State}. + internal_helper(X) -> X + 1. + "#; + let (facts, _, _, _, _) = facts_with_annotations(spec); + assert_eq!(facts.module_facts.len(), 1); + let module_fact = &facts.module_facts[0]; + assert_eq!(module_fact.name, "sample_worker"); + assert_eq!(module_fact.oncall, Some("platform_team".to_string())); + assert_eq!( + module_fact.behaviours, + Some(vec!["test_behaviour".to_string()]) + ); + assert_eq!(module_fact.exports.as_ref().map(|v| v.len()), Some(2)); + for expected in ["handle_task/2", "init/1"] { + assert!( + module_fact + .exports + .as_ref() + .unwrap() + .contains(&expected.to_string()) + ); + } + assert!(module_fact.module_doc.is_none()); + } + + #[test] + fn module_fact_multiple_behaviours_test() { + let spec = r#" + //- /src/factory_service.erl + -module(factory_service). + -oncall("manufacturing_team"). + -behaviour(supervisor). + -behaviour(test_behaviour1). + -behaviour(test_behaviour2). + + start_supervision() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). + create_product(Type, Config) -> test_behaviour1:produce(Type, Config). + manage_assembly(Parts) -> test_behaviour2:assemble(Parts). + "#; + let (facts, _, _, _, _) = facts_with_annotations(spec); + assert_eq!(facts.module_facts.len(), 1); + let module_fact = &facts.module_facts[0]; + assert_eq!(module_fact.name, "factory_service"); + assert_eq!(module_fact.oncall, Some("manufacturing_team".to_string())); + assert_eq!(module_fact.behaviours.as_ref().map(|v| v.len()), Some(3)); + for expected in ["test_behaviour1", "test_behaviour2", "supervisor"] { + assert!( + module_fact + .behaviours + .as_ref() + .unwrap() + .contains(&expected.to_string()) + ); + } + assert!(module_fact.exports.is_none()); + assert!(module_fact.module_doc.is_none()); + } + + #[test] + fn module_fact_no_oncall_test() { + let spec = r#" + //- /src/utility_helper.erl + -module(utility_helper). + -behaviour(supervisor). + -export([start_link/0, add_child/2]). + + start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). + add_child(ChildSpec, Opts) -> supervisor:start_child(?MODULE, {ChildSpec, Opts}). + "#; + let (facts, _, _, _, _) = facts_with_annotations(spec); + assert_eq!(facts.module_facts.len(), 1); + let module_fact = &facts.module_facts[0]; + assert_eq!(module_fact.name, "utility_helper"); + assert_eq!(module_fact.oncall, None); + assert_eq!(module_fact.behaviours, Some(vec!["supervisor".to_string()])); + assert_eq!(module_fact.exports.as_ref().map(|v| v.len()), Some(2)); + for expected in ["add_child/2", "start_link/0"] { + assert!( + module_fact + .exports + .as_ref() + .unwrap() + .contains(&expected.to_string()) + ); + } + assert!(module_fact.module_doc.is_none()); + } + #[allow(clippy::type_complexity)] - fn facts_with_annotations( + pub(crate) fn facts_with_annotations( spec: &str, ) -> ( IndexedFacts, @@ -2626,7 +2871,7 @@ mod tests { } #[track_caller] - fn xref_v2_check(spec: &str) { + pub(crate) fn xref_v2_check(spec: &str) { let (facts, mut expected_by_file, file_names, _d, _) = facts_with_annotations(spec); for xref_fact in facts.xref_v2 { let file_id = xref_fact.file_id; @@ -2642,7 +2887,7 @@ mod tests { if label.is_empty() { continue; } - let label = format!("{}/{}", file_name, label); + let label = format!("{file_name}/{label}"); let tuple = (range, label); let idx = annotations .iter() @@ -2791,7 +3036,7 @@ mod tests { .strip_suffix("\n```") .unwrap() .to_string(); - f.write_str(format!("var/{}", ttype).as_str()) + f.write_str(format!("var/{ttype}").as_str()) } Declaration::HeaderDeclaration(decl) => { f.write_str(format!("header/{}", decl.key.name).as_str()) @@ -2821,9 +3066,15 @@ mod tests { Some(arity) => arity.to_string(), None => "no_arity".to_string(), }; - let ods_link = match &xref.key.ods_url { - Some(_) => "has_ods", - None => "no_ods", + let ods_link = match ( + &xref.key.ods_url, + &xref.key.logview_url, + &xref.key.scuba_urls, + ) { + (Some(_), _, _) => "has_ods", + (None, Some(_), _) => "has_logview", + (None, None, Some(_)) => "has_scuba", + (None, None, None) => "no_ods", }; let exp = match &xref.key.expansion { Some(exp) => exp @@ -2841,7 +3092,13 @@ mod tests { ) } XRefTarget::Header(_) => f.write_str("header"), - XRefTarget::Record(xref) => f.write_str(format!("rec/{}", xref.key.name).as_str()), + XRefTarget::Record(xref) => { + let wam_link = match &xref.key.wam_url { + Some(_) => "has_wam", + None => "no_wam", + }; + f.write_str(format!("rec/{}/{}", xref.key.name, wam_link).as_str()) + } XRefTarget::Type(xref) => { f.write_str(format!("type/{}/{}", xref.key.name, xref.key.arity).as_str()) } diff --git a/crates/elp/src/bin/lint_cli.rs b/crates/elp/src/bin/lint_cli.rs index f32de37074..241a3d4481 100644 --- a/crates/elp/src/bin/lint_cli.rs +++ b/crates/elp/src/bin/lint_cli.rs @@ -13,16 +13,19 @@ use std::fs; use std::fs::File; use std::io::Write; use std::path::Path; -use std::path::PathBuf; use std::str; use std::sync::Arc; +use std::thread; +use std::time::SystemTime; use anyhow::Result; use anyhow::bail; +use crossbeam_channel::unbounded; use elp::build::load; use elp::build::types::LoadResult; use elp::cli::Cli; use elp::convert; +use elp::memory_usage::MemoryUsage; use elp::otp_file_to_ignore; use elp::read_lint_config_file; use elp_eqwalizer::Mode; @@ -50,16 +53,16 @@ use elp_ide::elp_ide_db::elp_base_db::ProjectId; use elp_ide::elp_ide_db::elp_base_db::Vfs; use elp_ide::elp_ide_db::elp_base_db::VfsPath; use elp_ide::elp_ide_db::source_change::SourceChange; +use elp_ide_db::text_edit::TextSize; +use elp_log::telemetry; use elp_project_model::AppName; use elp_project_model::AppType; use elp_project_model::DiscoverConfig; use elp_project_model::buck::BuckQueryConfig; -use elp_text_edit::TextSize; use fxhash::FxHashMap; use fxhash::FxHashSet; use hir::FormIdx; use hir::InFile; -use indicatif::ParallelProgressIterator; use itertools::Itertools; use paths::Utf8PathBuf; use rayon::prelude::ParallelBridge; @@ -67,12 +70,16 @@ use rayon::prelude::ParallelIterator; use crate::args::Lint; use crate::reporting; +use crate::reporting::print_memory_usage; pub fn run_lint_command( args: &Lint, cli: &mut dyn Cli, query_config: &BuckQueryConfig, ) -> Result<()> { + let start_time = SystemTime::now(); + let memory_start = MemoryUsage::now(); + if let Some(to) = &args.to { fs::create_dir_all(to)? }; @@ -82,8 +89,22 @@ pub fn run_lint_command( // We load the project after loading config, in case it bails with // errors. No point wasting time if the config is wrong. let mut loaded = load_project(args, cli, query_config)?; + telemetry::report_elapsed_time("lint operational", start_time); - do_codemod(cli, &mut loaded, &diagnostics_config, args) + let result = do_codemod(cli, &mut loaded, &diagnostics_config, args); + + telemetry::report_elapsed_time("lint done", start_time); + + let memory_end = MemoryUsage::now(); + let memory_used = memory_end - memory_start; + + // Print memory usage at the end if requested and format is normal + if args.is_format_normal() && args.report_system_stats { + print_memory_usage(loaded.analysis_host, loaded.vfs, cli)?; + writeln!(cli, "{}", memory_used)?; + } + + result } fn get_and_report_diagnostics_config(args: &Lint, cli: &mut dyn Cli) -> Result { @@ -94,7 +115,7 @@ fn get_and_report_diagnostics_config(args: &Lint, cli: &mut dyn Cli) -> Result Result> { + loaded: &LoadResult, + module: &Option, +) -> Result<(Vec<(String, FileId, DiagnosticCollection)>, bool, bool)> { let module_index = analysis.module_index(*project_id).unwrap(); - let module_iter = module_index.iter_own(); let ignored_apps: FxHashSet>> = args .ignore_apps .iter() .map(|name| Some(Some(AppName(name.to_string())))) .collect(); - let pb = cli.progress(module_iter.len() as u64, "Parsing modules"); let app_name = args.app.as_ref().map(|name| AppName(name.to_string())); - Ok(module_iter - .par_bridge() - .progress_with(pb) - .map_with( - analysis.clone(), - |db, (module_name, _file_source, file_id)| { - if !otp_file_to_ignore(db, file_id) - && db.file_app_type(file_id).ok() != Some(Some(AppType::Dep)) - && !ignored_apps.contains(&db.file_app_name(file_id).ok()) - && (app_name.is_none() - || db.file_app_name(file_id).ok().as_ref() == Some(&app_name)) - { - do_parse_one(db, config, file_id, module_name.as_str(), args).unwrap() - } else { - None - } - }, - ) - .flatten() - .collect()) + // Create a channel for streaming results + let (tx, rx) = unbounded(); + + // Collect modules into an owned vector + let modules: Vec<_> = module_index + .iter_own() + .map(|(name, source, file_id)| (name.as_str().to_string(), source, file_id)) + .collect(); + + let analysis_clone = analysis.clone(); + let config_clone = config.clone(); + let args_clone = args.clone(); + + let join_handle = thread::spawn(move || { + modules + .into_iter() + .par_bridge() + .map_with( + (analysis_clone, tx), + |(db, tx), (module_name, _file_source, file_id)| { + if !otp_file_to_ignore(db, file_id) + && db.file_app_type(file_id).ok() != Some(Some(AppType::Dep)) + && !ignored_apps.contains(&db.file_app_name(file_id).ok()) + && (app_name.is_none() + || db.file_app_name(file_id).ok().as_ref() == Some(&app_name)) + && let Ok(Some(result)) = do_diagnostics_one( + db, + &config_clone, + file_id, + &module_name, + &args_clone, + ) + { + // Send result through channel + let _ = tx.send(result); + } + }, + ) + .for_each(|_| {}); // Consume the iterator + }); + + // Collect results as they arrive from the channel + let mut results = Vec::new(); + let mut err_in_diag = false; + let mut module_count = 0; + let mut any_diagnostics_printed = false; + + for result in rx { + let printed = if args.skip_stream_print() { + false + } else { + print_diagnostic_result( + cli, + analysis, + config, + args, + loaded, + module, + &mut err_in_diag, + &mut module_count, + &result, + )? + }; + any_diagnostics_printed = any_diagnostics_printed || printed; + results.push(result); + } + + // Wait for the thread to complete before returning + // This ensures that analysis_clone is dropped and its read lock is released + join_handle + .join() + .expect("Failed to join diagnostics thread"); + + Ok((results, err_in_diag, any_diagnostics_printed)) } -fn do_parse_one( +fn do_diagnostics_one( db: &Analysis, config: &DiagnosticsConfig, file_id: FileId, @@ -218,6 +293,8 @@ pub fn do_codemod( ) -> Result<()> { // Declare outside the block so it has the right lifetime for filter_diagnostics let res; + let streamed_err_in_diag; + let mut any_diagnostics_printed = false; let mut initial_diags = { // We put this in its own block so that analysis is // freed before we apply lints. To apply lints @@ -229,7 +306,7 @@ pub fn do_codemod( Some(module) => match analysis.module_file_id(loaded.project_id, module)? { Some(file_id) => { if args.is_format_normal() { - writeln!(cli, "module specified: {}", module)?; + writeln!(cli, "module specified: {module}")?; } (Some(file_id), analysis.module_name(file_id)?) } @@ -238,7 +315,7 @@ pub fn do_codemod( None => match &args.file { Some(file_name) => { if args.is_format_normal() { - writeln!(cli, "file specified: {}", file_name)?; + writeln!(cli, "file specified: {file_name}")?; } let path_buf = Utf8PathBuf::from_path_buf(fs::canonicalize(file_name).unwrap()) .expect("UTF8 conversion failed"); @@ -258,97 +335,144 @@ pub fn do_codemod( res = match (file_id, name) { (None, _) => { - do_parse_all(cli, &analysis, &loaded.project_id, diagnostics_config, args)? + let (results, err_in_diag, any_printed) = do_diagnostics_all( + cli, + &analysis, + &loaded.project_id, + diagnostics_config, + args, + loaded, + &args.module, + )?; + streamed_err_in_diag = err_in_diag; + any_diagnostics_printed = any_printed; + results } (Some(file_id), Some(name)) => { - if let Some(app) = &args.app { - if let Ok(Some(file_app)) = analysis.file_app_name(file_id) { - if file_app != AppName(app.to_string()) { - panic!("Module {} does not belong to app {}", name.as_str(), app) - } + if let Some(app) = &args.app + && let Ok(Some(file_app)) = analysis.file_app_name(file_id) + && file_app != AppName(app.to_string()) + { + panic!("Module {} does not belong to app {}", name.as_str(), app) + } + let result = + do_diagnostics_one(&analysis, diagnostics_config, file_id, &name, args)? + .map_or(vec![], |x| vec![x]); + + // Print diagnostics for the single file + let mut err_in_diag = false; + let mut module_count = 0; + if args.skip_stream_print() { + any_diagnostics_printed = false; + } else { + for r in &result { + let printed = print_diagnostic_result( + cli, + &analysis, + diagnostics_config, + args, + loaded, + &args.module, + &mut err_in_diag, + &mut module_count, + r, + )?; + any_diagnostics_printed = any_diagnostics_printed || printed; } } - do_parse_one(&analysis, diagnostics_config, file_id, &name, args)? - .map_or(vec![], |x| vec![x]) + + streamed_err_in_diag = err_in_diag; + result } (Some(file_id), _) => { - panic!("Could not get name from file_id for {:?}", file_id) + panic!("Could not get name from file_id for {file_id:?}") } }; - filter_diagnostics( - &analysis, - &args.module, - Some(&diagnostics_config.enabled), - &res, - &FxHashSet::default(), - )? + res }; - if initial_diags.is_empty() { - if args.is_format_normal() { - writeln!(cli, "No diagnostics reported")?; - } - } else { - initial_diags.sort_by(|(a, _, _), (b, _, _)| a.cmp(b)); - let mut err_in_diag = false; - if args.is_format_json() { - for (_name, file_id, diags) in &initial_diags { - if args.print_diags { - for diag in diags { - // We use JSON output for CI, and want to see warnings too. - // So do not filter on errors only - err_in_diag = true; - let vfs_path = loaded.vfs.file_path(*file_id); - let analysis = loaded.analysis(); - let root_path = &analysis - .project_data(*file_id) - .unwrap_or_else(|_err| panic!("could not find project data")) - .unwrap_or_else(|| panic!("could not find project data")) - .root_dir; - let relative_path = reporting::get_relative_path(root_path, vfs_path); - let prefix = args.prefix.as_ref(); - print_diagnostic_json( - diag, - &analysis, - *file_id, - with_prefix(relative_path, prefix).as_path(), - args.use_cli_severity, - cli, - )?; - } - } - } - } else { - writeln!( - cli, - "Diagnostics reported in {} modules:", - initial_diags.len() - )?; + let mut err_in_diag = streamed_err_in_diag; + // At this point, the analysis variable from above is dropped - for (name, file_id, diags) in &initial_diags { - writeln!(cli, " {}: {}", name, diags.len())?; - if args.print_diags { - for diag in diags { - if let diagnostics::Severity::Error = diag.severity { - err_in_diag = true; - }; - print_diagnostic( - diag, - &loaded.analysis(), - *file_id, - args.use_cli_severity, - cli, - )?; - } - } - } + // When streaming is disabled (--no-stream) and we're not applying fixes, + // we need to print diagnostics now since they weren't printed during streaming + if args.no_stream && !args.apply_fix && !initial_diags.is_empty() { + let analysis = loaded.analysis(); + let mut module_count = 0; + initial_diags.sort_by(|(a, _, _), (b, _, _)| a.cmp(b)); + for result in &initial_diags { + let printed = print_diagnostic_result( + cli, + &analysis, + diagnostics_config, + args, + loaded, + &args.module, + &mut err_in_diag, + &mut module_count, + result, + )?; + any_diagnostics_printed = any_diagnostics_printed || printed; } - if args.apply_fix && diagnostics_config.enabled.all_enabled() { + } + + // Handle apply_fix case separately since it needs to filter diagnostics anyway + if args.apply_fix { + if diagnostics_config.enabled.all_enabled() { bail!( "We cannot apply fixes if all diagnostics enabled. Perhaps provide --diagnostic-filter" ); } - if args.apply_fix && !diagnostics_config.enabled.all_enabled() { + + let mut filtered_diags = { + let analysis = loaded.analysis(); + filter_diagnostics( + &analysis, + &args.module, + Some(&diagnostics_config.enabled), + &initial_diags, + &FxHashSet::default(), + )? + }; + + if filtered_diags.is_empty() { + if args.is_format_normal() { + writeln!(cli, "No diagnostics reported")?; + } + } else { + if args.skip_stream_print() { + filtered_diags.sort_by(|(a, _, _), (b, _, _)| a.cmp(b)); + let module_count: &mut i32 = &mut 0; + let has_diagnostics: &mut bool = &mut false; + if args.is_format_json() { + do_print_diagnostics_json_filtered( + cli, + args, + loaded, + &mut err_in_diag, + module_count, + has_diagnostics, + &filtered_diags, + )?; + } else { + { + // Scope the analysis instance to ensure it's dropped before creating Lints + let analysis = loaded.analysis(); + do_print_diagnostics_filtered( + cli, + &analysis, + args, + loaded, + &mut err_in_diag, + module_count, + has_diagnostics, + &filtered_diags, + )?; + // Analysis is dropped here + } + } + } + let mut changed_files = FxHashSet::default(); let mut lints = Lints::new( &mut loaded.analysis_host, @@ -356,7 +480,7 @@ pub fn do_codemod( &mut loaded.vfs, args, &mut changed_files, - initial_diags, + filtered_diags, ); // We handle the fix application result here, so // the overall status of whether error-severity @@ -365,23 +489,235 @@ pub fn do_codemod( match lints.apply_relevant_fixes(args.is_format_normal(), cli) { Ok(_) => {} Err(err) => { - writeln!(cli, "Apply fix failed: {:#}", err).ok(); + writeln!(cli, "Apply fix failed: {err:#}").ok(); } }; + + if err_in_diag { + bail!("Errors found") + } } - if err_in_diag { + } else { + // Non-apply-fix case: rely on any_diagnostics_printed which is set + // correctly based on filtered diagnostics during streaming/batch printing + if !any_diagnostics_printed { + if args.is_format_normal() { + writeln!(cli, "No diagnostics reported")?; + } + } else if err_in_diag { bail!("Errors found") } } Ok(()) } +#[allow(clippy::too_many_arguments)] +fn print_diagnostic_result( + cli: &mut dyn Cli, + analysis: &Analysis, + config: &DiagnosticsConfig, + args: &Lint, + loaded: &LoadResult, + module: &Option, + err_in_diag: &mut bool, + module_count: &mut i32, + result: &(String, FileId, DiagnosticCollection), +) -> Result { + if args.is_format_json() { + do_print_diagnostic_collection_json( + cli, + analysis, + config, + args, + loaded, + module, + err_in_diag, + module_count, + result, + ) + } else { + do_print_diagnostic_collection( + cli, + analysis, + config, + args, + loaded, + module, + err_in_diag, + module_count, + result, + ) + } +} + +#[allow(clippy::too_many_arguments)] +fn do_print_diagnostic_collection( + cli: &mut dyn Cli, + analysis: &Analysis, + config: &DiagnosticsConfig, + args: &Lint, + loaded: &LoadResult, + module: &Option, + err_in_diag: &mut bool, + module_count: &mut i32, + result: &(String, FileId, DiagnosticCollection), +) -> Result { + let single_result = vec![result.clone()]; + let mut has_diagnostics = false; + if let Ok(filtered) = filter_diagnostics( + analysis, + module, + Some(&config.enabled), + &single_result, + &FxHashSet::default(), + ) { + do_print_diagnostics_filtered( + cli, + analysis, + args, + loaded, + err_in_diag, + module_count, + &mut has_diagnostics, + &filtered, + )?; + } + Ok(has_diagnostics) +} + +#[allow(clippy::too_many_arguments)] +fn do_print_diagnostics_filtered( + cli: &mut dyn Cli, + analysis: &Analysis, + args: &Lint, + loaded: &LoadResult, + err_in_diag: &mut bool, + module_count: &mut i32, + has_diagnostics: &mut bool, + filtered: &[(String, FileId, Vec)], +) -> Result<(), anyhow::Error> { + let _: () = for (name, file_id, diags) in filtered { + if !diags.is_empty() { + *has_diagnostics = true; + if *module_count == 0 { + writeln!(cli, "Diagnostics reported:")?; + } + *module_count += 1; + if !args.print_diags { + writeln!(cli, " {}: {}", name, diags.len())?; + } else { + for diag in diags { + if let diagnostics::Severity::Error = diag.severity { + *err_in_diag = true; + }; + // Get relative path for diagnostic output + let vfs_path = loaded.vfs.file_path(*file_id); + let root_path = &analysis + .project_data(*file_id) + .unwrap_or_else(|_err| panic!("could not find project data")) + .unwrap_or_else(|| panic!("could not find project data")) + .root_dir; + let relative_path = reporting::get_relative_path(root_path, vfs_path); + print_diagnostic( + diag, + analysis, + &loaded.vfs, + *file_id, + Some(relative_path), + args.use_cli_severity, + cli, + )?; + } + } + } + }; + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +fn do_print_diagnostic_collection_json( + cli: &mut dyn Cli, + analysis: &Analysis, + config: &DiagnosticsConfig, + args: &Lint, + loaded: &LoadResult, + module: &Option, + err_in_diag: &mut bool, + module_count: &mut i32, + result: &(String, FileId, DiagnosticCollection), +) -> Result { + let single_result = vec![result.clone()]; + let mut has_diagnostics = false; + if let Ok(filtered) = filter_diagnostics( + analysis, + module, + Some(&config.enabled), + &single_result, + &FxHashSet::default(), + ) { + do_print_diagnostics_json_filtered( + cli, + args, + loaded, + err_in_diag, + module_count, + &mut has_diagnostics, + &filtered, + )?; + } + Ok(has_diagnostics) +} + +fn do_print_diagnostics_json_filtered( + cli: &mut dyn Cli, + args: &Lint, + loaded: &LoadResult, + err_in_diag: &mut bool, + module_count: &mut i32, + has_diagnostics: &mut bool, + filtered: &[(String, FileId, Vec)], +) -> Result<(), anyhow::Error> { + let _: () = for (name, file_id, diags) in filtered { + if !diags.is_empty() { + *has_diagnostics = true; + *module_count += 1; + if !args.print_diags { + writeln!(cli, " {}: {}", name, diags.len())?; + } else { + for diag in diags { + *err_in_diag = true; + + // Get relative path for diagnostic output + let vfs_path = loaded.vfs.file_path(*file_id); + let analysis = loaded.analysis(); + let root_path = &analysis + .project_data(*file_id) + .unwrap_or_else(|_err| panic!("could not find project data")) + .unwrap_or_else(|| panic!("could not find project data")) + .root_dir; + let relative_path = reporting::get_relative_path(root_path, vfs_path); + print_diagnostic_json( + diag, + &analysis, + *file_id, + relative_path, + args.use_cli_severity, + cli, + )?; + } + } + } + }; + Ok(()) +} + fn get_diagnostics_config(args: &Lint) -> Result { let cfg_from_file = if args.read_config || args.config_file.is_some() { read_lint_config_file(&args.project, &args.config_file)? } else { LintConfig::default() }; + let cfg = DiagnosticsConfig::default() .configure_diagnostics( &cfg_from_file, @@ -400,12 +736,82 @@ fn get_diagnostics_config(args: &Lint) -> Result { fn print_diagnostic( diag: &diagnostics::Diagnostic, analysis: &Analysis, + vfs: &Vfs, file_id: FileId, + path: Option<&Path>, use_cli_severity: bool, cli: &mut dyn Cli, ) -> Result<(), anyhow::Error> { let line_index = analysis.line_index(file_id)?; - writeln!(cli, " {}", diag.print(&line_index, use_cli_severity))?; + let diag_str = diag.print(&line_index, use_cli_severity); + if let Some(path) = path { + writeln!(cli, "{}:{}", path.display(), diag_str)?; + } else { + writeln!(cli, " {}", diag_str)?; + } + + // Print any related information, indented + if let Some(related_info) = &diag.related_info { + for info in related_info { + let info_line_index = analysis.line_index(info.file_id)?; + let start = info_line_index.line_col(info.range.start()); + let end = info_line_index.line_col(info.range.end()); + + // Include file identifier if related info is from a different file + if info.file_id != file_id { + let file_identifier = + if let Ok(Some(module_name)) = analysis.module_name(info.file_id) { + // It's a module (.erl file), use module name + format!("[{}]", module_name.as_str()) + } else { + // Not a module (e.g., include file), use relative path + let vfs_path = vfs.file_path(info.file_id); + if let Ok(Some(project_data)) = analysis.project_data(info.file_id) { + let relative_path = + reporting::get_relative_path(&project_data.root_dir, vfs_path); + format!("[{}]", relative_path.display()) + } else { + // Fallback: just show location without file identifier + String::new() + } + }; + + if file_identifier.is_empty() { + writeln!( + cli, + " {}:{}-{}:{}: {}", + start.line + 1, + start.col_utf16 + 1, + end.line + 1, + end.col_utf16 + 1, + info.message + )?; + } else { + writeln!( + cli, + " {} {}:{}-{}:{}: {}", + file_identifier, + start.line + 1, + start.col_utf16 + 1, + end.line + 1, + end.col_utf16 + 1, + info.message + )?; + } + } else { + writeln!( + cli, + " {}:{}-{}:{}: {}", + start.line + 1, + start.col_utf16 + 1, + end.line + 1, + end.col_utf16 + 1, + info.message + )?; + } + } + } + Ok(()) } @@ -424,8 +830,7 @@ fn print_diagnostic_json( cli, "{}", serde_json::to_string(&converted_diagnostic).unwrap_or_else(|err| panic!( - "print_diagnostics_json failed for '{:?}': {}", - converted_diagnostic, err + "print_diagnostics_json failed for '{converted_diagnostic:?}': {err}" )) )?; Ok(()) @@ -591,13 +996,10 @@ impl<'a> Lints<'a> { if self.args.check_eqwalize_all { writeln!(cli, "Running eqwalize-all to check for knock-on problems.")?; } - let diags = do_parse_one( - &self.analysis_host.analysis(), - self.cfg, - file_id, - &name, - self.args, - )?; + let diags = { + let analysis = self.analysis_host.analysis(); + do_diagnostics_one(&analysis, self.cfg, file_id, &name, self.args)? + }; let err_in_diags = diags.iter().any(|(_, file_id, diags)| { let diags = diags.diagnostics_for(*file_id); diags @@ -608,14 +1010,15 @@ impl<'a> Lints<'a> { bail!("Applying change introduces an error diagnostic"); } else { self.changed_files.insert((file_id, name.clone())); - let changes = changes - .iter() - .filter_map(|d| { - form_from_diff(&self.analysis_host.analysis(), file_id, d) - }) - .collect::>(); + let changed_forms = { + let analysis = self.analysis_host.analysis(); + changes + .iter() + .filter_map(|d| form_from_diff(&analysis, file_id, d)) + .collect::>() + }; - for form_id in &changes { + for form_id in &changed_forms { self.changed_forms.insert(InFile::new(file_id, *form_id)); } @@ -628,24 +1031,24 @@ impl<'a> Lints<'a> { .flatten() .collect::>(); - let new_diagnostics = filter_diagnostics( - &self.analysis_host.analysis(), - &None, - None, - &new_diags, - &self.changed_forms, - )?; + let new_diagnostics = { + let analysis = self.analysis_host.analysis(); + filter_diagnostics(&analysis, &None, None, &new_diags, &self.changed_forms)? + }; self.diags = diagnostics_by_file_id(&new_diagnostics); if !self.diags.is_empty() { writeln!(cli, "---------------------------------------------\n")?; writeln!(cli, "New filtered diagnostics")?; + let analysis = self.analysis_host.analysis(); for (file_id, (name, diags)) in &self.diags { writeln!(cli, " {}: {}", name, diags.len())?; for diag in diags.iter() { print_diagnostic( diag, - &self.analysis_host.analysis(), + &analysis, + self.vfs, *file_id, + None, self.args.use_cli_severity, cli, )?; @@ -717,10 +1120,13 @@ impl<'a> Lints<'a> { if format_normal { writeln!(cli, "---------------------------------------------\n")?; writeln!(cli, "Applying fix in module '{name}' for")?; + let analysis = self.analysis_host.analysis(); print_diagnostic( diagnostic, - &self.analysis_host.analysis(), + &analysis, + self.vfs, file_id, + None, self.args.use_cli_severity, cli, )?; @@ -781,12 +1187,15 @@ impl<'a> Lints<'a> { fixes.iter().cloned().unzip(); if format_normal { writeln!(cli, "---------------------------------------------\n")?; - writeln!(cli, "Applying fix(es) in module '{name}' for")?; + let plural = if diagnostics.len() > 1 { "es" } else { "" }; + writeln!(cli, "Applying fix{plural} in module '{name}' for")?; for diagnostic in diagnostics { print_diagnostic( &diagnostic, &self.analysis_host.analysis(), + self.vfs, file_id, + None, self.args.use_cli_severity, cli, )?; @@ -864,7 +1273,7 @@ impl<'a> Lints<'a> { let mut output = File::create(to_path).ok()?; write!(output, "{file_text}").ok()?; } else if let Some(to) = &self.args.to { - let to_path = to.join(format!("{}.erl", name)); + let to_path = to.join(format!("{name}.erl")); let mut output = File::create(to_path).ok()?; write!(output, "{file_text}").ok()?; } else { @@ -921,13 +1330,6 @@ fn get_form_id_at_offset( Some(form_id) } -fn with_prefix(path: &Path, prefix: Option<&String>) -> PathBuf { - match prefix { - Some(prefix) => Path::new(prefix).join(path), - None => path.into(), - } -} - #[cfg(test)] mod tests { use std::ffi::OsString; @@ -936,6 +1338,7 @@ mod tests { use elp::cli::Fake; use elp_ide::FunctionMatch; use elp_ide::diagnostics::DiagnosticCode; + use elp_ide::diagnostics::ErlangServiceConfig; use elp_ide::diagnostics::Lint; use elp_ide::diagnostics::LintsFromConfig; use elp_ide::diagnostics::ReplaceCall; @@ -943,6 +1346,7 @@ mod tests { use elp_ide::diagnostics::Replacement; use expect_test::Expect; use expect_test::expect; + use fxhash::FxHashMap; use super::LintConfig; use super::do_codemod; @@ -951,7 +1355,7 @@ mod tests { use crate::args::Command; macro_rules! args_vec { - ($($e:expr_2021$(,)?)+) => { + ($($e:expr$(,)?)+) => { vec![$(OsString::from($e),)+] } } @@ -967,12 +1371,19 @@ mod tests { }, enabled_lints: vec![DiagnosticCode::HeadMismatch], disabled_lints: vec![], + linters: FxHashMap::default(), + erlang_service: ErlangServiceConfig { + warnings_as_errors: true, + }, }) .unwrap(); expect![[r#" enabled_lints = ["P1700"] disabled_lints = [] + + [erlang_service] + warnings_as_errors = true [[ad_hoc_lints.lints]] type = "ReplaceCall" @@ -984,6 +1395,8 @@ mod tests { [ad_hoc_lints.lints.action] action = "Replace" type = "UseOk" + + [linters] "#]] .assert_eq(&result); } @@ -1004,9 +1417,13 @@ mod tests { TrivialMatch, ], disabled_lints: [], + erlang_service: ErlangServiceConfig { + warnings_as_errors: false, + }, ad_hoc_lints: LintsFromConfig { lints: [], }, + linters: {}, } "#]] .assert_debug_eq(&lint_config); @@ -1052,11 +1469,11 @@ mod tests { head_mismatcX(0) -> 0. "#, expect![[r#" - module specified: lints - Diagnostics reported in 1 modules: - lints: 1 - 4:2-4:15::[Error] [P1700] head mismatch 'head_mismatcX' vs 'head_mismatch' - "#]], + module specified: lints + Diagnostics reported: + app_a/src/lints.erl:5:3-5:16::[Error] [P1700] head mismatch 'head_mismatcX' vs 'head_mismatch' + 4:3-4:16: Mismatched clause name + "#]], expect![""], ); } @@ -1073,9 +1490,8 @@ mod tests { "#, expect![[r#" module specified: lints - Diagnostics reported in 1 modules: - lints: 1 - 2:2-2:5::[Warning] [L1230] function foo/0 is unused + Diagnostics reported: + app_a/src/lints.erl:3:3-3:6::[Warning] [L1230] function foo/0 is unused "#]], expect![""], ); diff --git a/crates/elp/src/bin/main.rs b/crates/elp/src/bin/main.rs index 3a5c4daff0..56535e4492 100644 --- a/crates/elp/src/bin/main.rs +++ b/crates/elp/src/bin/main.rs @@ -40,8 +40,10 @@ mod erlang_service_cli; mod explain_cli; mod glean; mod lint_cli; +// @fb-only: mod meta_only; mod reporting; mod shell; +mod ssr_cli; // Use jemalloc as the global allocator #[cfg(not(any(target_env = "msvc", target_os = "openbsd")))] @@ -63,16 +65,21 @@ const THREAD_STACK_SIZE: usize = 10_000_000; fn main() { let _timer = timeit!("main"); - let mut cli = cli::Real::default(); let args = args::args().run(); - let res = try_main(&mut cli, args); + let use_color = args.should_use_color(); + let mut cli: Box = if use_color { + Box::new(cli::Real::default()) + } else { + Box::new(cli::NoColor::default()) + }; + let res = try_main(&mut *cli, args); let code = handle_res(res, cli.err()); process::exit(code); } fn handle_res(result: Result<()>, stderr: &mut dyn Write) -> i32 { if let Err(err) = result { - writeln!(stderr, "{:#}", err).unwrap(); + writeln!(stderr, "{err:#}").unwrap(); 101 } else { 0 @@ -81,7 +88,7 @@ fn handle_res(result: Result<()>, stderr: &mut dyn Write) -> i32 { fn setup_static(args: &Args) { if let Err(err) = eqwalizer_support::setup_eqwalizer_support(&EQWALIZER_SUPPORT_DIR) { - log::warn!("Failed to setup eqwalizer_support: {}", err); + log::warn!("Failed to setup eqwalizer_support: {err}"); } if let Some(erl) = &args.erl { let path = fs::canonicalize(erl).expect("erl path should be valid"); @@ -96,14 +103,28 @@ fn setup_static(args: &Args) { } } +fn setup_cli_telemetry(args: &Args) { + match &args.command { + args::Command::RunServer(_) => { + // Do nothing, we have server telemetry + } + _ => { + // Initialize CLI telemetry, if used + // @fb-only: meta_only::initialize_telemetry(); + } + } +} + fn try_main(cli: &mut dyn Cli, args: Args) -> Result<()> { let logger = setup_logging(&args.log_file, args.no_log_buffering)?; + setup_cli_telemetry(&args); INIT.call_once(|| { setup_static(&args); setup_thread_pool(); }); let query_config = args.query_config(); + let use_color = args.should_use_color(); match args.command { args::Command::RunServer(_) => run_server(logger)?, args::Command::ParseAll(args) => erlang_service_cli::parse_all(&args, cli, &query_config)?, @@ -121,15 +142,18 @@ fn try_main(cli: &mut dyn Cli, args: Args) -> Result<()> { args::Command::BuildInfo(args) => build_info_cli::save_build_info(args, &query_config)?, args::Command::ProjectInfo(args) => build_info_cli::save_project_info(args, &query_config)?, args::Command::Lint(args) => lint_cli::run_lint_command(&args, cli, &query_config)?, + args::Command::Ssr(ssr_args) => { + ssr_cli::run_ssr_command(&ssr_args, cli, &query_config, use_color)? + } args::Command::GenerateCompletions(args) => { let instructions = args::gen_completions(&args.shell); - writeln!(cli, "#Please run this:\n{}", instructions)? + writeln!(cli, "#Please run this:\n{instructions}")? } args::Command::Version(_) => writeln!(cli, "elp {}", elp::version())?, args::Command::Shell(args) => shell::run_shell(&args, cli, &query_config)?, args::Command::Help() => { let help = batteries::get_usage(args::args()); - writeln!(cli, "{}", help)? + writeln!(cli, "{help}")? } args::Command::Explain(args) => explain_cli::explain(&args, cli)?, args::Command::Glean(args) => glean::index(&args, cli, &query_config)?, @@ -169,7 +193,7 @@ fn setup_thread_pool() { .stack_size(THREAD_STACK_SIZE) .build_global() { - log::warn!("Failed to setup thread pool: {}", err); + log::warn!("Failed to setup thread pool: {err}"); } } @@ -237,7 +261,7 @@ mod tests { const BUCK_QUERY_CONFIG: BuckQueryConfig = BuckQueryConfig::BuildGeneratedCode; macro_rules! args_vec { - ($($e:expr_2021$(,)?)+) => { + ($($e:expr$(,)?)+) => { vec![$(OsString::from($e),)+] } } @@ -264,7 +288,7 @@ mod tests { let (_stdout, stderr, code) = elp(args_vec![ "parse-all", "--project", - "../../test_projects/standard", + "../../test/test_projects/standard", "--to", tmp.path(), ]); @@ -282,7 +306,7 @@ mod tests { fn parse_all_complete(project: &str) -> Result { // Just check the command returns. - let project_path = format!("../../test_projects/{}", project); + let project_path = format!("../../test/test_projects/{project}"); let tmp = Builder::new().prefix("elp_parse_all_").tempdir().unwrap(); let (_stdout, _stderr, code) = elp(args_vec![ "parse-all", @@ -348,20 +372,20 @@ mod tests { Mode::Cli, &BUCK_QUERY_CONFIG, ) - .with_context(|| format!("Failed to load project at {}", str_path)) + .with_context(|| format!("Failed to load project at {str_path}")) .unwrap(); loaded .analysis_host .raw_database_mut() .set_eqwalizer_config(Arc::new(config)); build::compile_deps(&loaded, &cli) - .with_context(|| format!("Failed to compile deps for project {}", project)) + .with_context(|| format!("Failed to compile deps for project {project}")) .unwrap(); let analysis = loaded.analysis(); let module_index = analysis .module_index(loaded.project_id) - .with_context(|| format!("No module index for project {}", project)) + .with_context(|| format!("No module index for project {project}")) .unwrap(); let file_ids: Vec = module_index .iter_own() @@ -419,26 +443,41 @@ mod tests { }) .unwrap(); - let otp_version = OTP_VERSION.as_ref().expect("MISSING OTP VERSION"); let exp_path = expect_file!(format!( - "../resources/test/{}/{}/{}-OTP-{}.pretty", + "../resources/test/{}/{}/{}.pretty", project, app, module.as_str(), - otp_version, )); let (stdout, _) = cli.to_strings(); - assert_normalised_file(exp_path, &stdout, project_path.into(), false); + + let otp_version = OTP_VERSION.as_ref().expect("MISSING OTP VERSION"); + let otp_version_regex = + regex::bytes::Regex::new(&format!("{}OTP([0-9]+)Only", "@")).unwrap(); + let contents = analysis.file_text(file_id).unwrap(); + let otp_version_capture = otp_version_regex + .captures(&contents.as_bytes()[0..(2001.min(contents.len()))]); + if let Some((_, [otp_version_only])) = + otp_version_capture.map(|cap| cap.extract()) + { + if otp_version_only == otp_version.as_bytes() { + assert_normalised_file( + exp_path, + &stdout, + project_path.into(), + false, + ); + } + } else { + assert_normalised_file(exp_path, &stdout, project_path.into(), false); + } } } EqwalizerDiagnostics::NoAst { module } => { - panic!( - "Could not run tests because module {} was not found", - module - ) + panic!("Could not run tests because module {module} was not found") } EqwalizerDiagnostics::Error(error) => { - panic!("Could not run tests: {}", error) + panic!("Could not run tests: {error}") } } } @@ -566,10 +605,7 @@ mod tests { fn eqwalize_target_diagnostics_match_snapshot_pretty() { if cfg!(feature = "buck") { simple_snapshot( - args_vec![ - "eqwalize-target", - "//whatsapp/elp/test_projects/standard:app_a", - ], + args_vec!["eqwalize-target", "//standard:app_a",], "standard", expect_file!("../resources/test/standard/eqwalize_target_diagnostics.pretty"), true, @@ -633,6 +669,24 @@ mod tests { ); } + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn parse_all_diagnostics_severity(buck: bool) { + simple_snapshot_expect_error( + args_vec![ + "parse-elp", + "--module", + "diagnostics", + "--severity", + "error" + ], + "diagnostics", + expect_file!("../resources/test/diagnostics/parse_all_diagnostics_error.stdout"), + buck, + None, + ); + } + #[test_case(false ; "rebar")] #[test_case(true ; "buck")] fn parse_elp_file_attribute(buck: bool) { @@ -818,6 +872,25 @@ mod tests { ); } + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn parse_elp_report_system_stats(buck: bool) { + simple_snapshot_output_contains( + args_vec!["parse-elp", "--report-system-stats", "--module", "app_a"], + "standard", + &[ + "Memory usage:", + "allocated:", + "active:", + "resident:", + "FileTextQuery", + ], + buck, + None, + 0, + ); + } + #[test] fn build_info_json_not_buck() { let tmp_dir = make_tmp_dir(); @@ -839,8 +912,7 @@ mod tests { ); assert!( stderr.is_empty(), - "expected stderr to be empty, got:\n{}", - stderr + "expected stderr to be empty, got:\n{stderr}" ); assert!(tmp_file.clone().exists()); let content = fs::read_to_string(tmp_file).unwrap(); @@ -893,13 +965,14 @@ mod tests { ); assert!( stderr.is_empty(), - "expected stderr to be empty, got:\n{}", - stderr + "expected stderr to be empty, got:\n{stderr}" ); assert!(tmp_file.clone().exists()); let content = fs::read_to_string(tmp_file).unwrap(); let mut buck_config = BuckConfig::default(); - buck_config.buck_root = Some(AbsPathBuf::assert_utf8(current_dir().unwrap())); + buck_config.buck_root = Some(AbsPathBuf::assert_utf8( + current_dir().unwrap().join(path_str.clone()), + )); let prelude_cell = get_prelude_cell(&buck_config).expect("could not get prelude"); let prelude_cell = prelude_cell.strip_prefix("/").unwrap(); let content = content.replace(prelude_cell, "/[prelude]/"); @@ -911,38 +984,13 @@ mod tests { Some(AbsPathBuf::assert(Utf8PathBuf::from_path_buf(abs).unwrap())); let content = normalise_prelude_path(content, buck_config); + let content = sort_json(&content); + expect![[r#" { "apps": [ { - "name": "test_exec", - "dir": "/[prelude]//erlang/common_test/test_exec/src", - "src_dirs": [ - "" - ], - "extra_src_dirs": [], - "include_dirs": [], - "macros": {} - }, - { - "name": "diagnostics_app_a", - "dir": "app_a", - "src_dirs": [ - "src" - ], - "extra_src_dirs": [], - "include_dirs": [ - "include" - ], - "macros": { - "COMMON_TEST": "true", - "TEST": "true" - } - }, - { - "name": "app_a_SUITE", "dir": "app_a/test", - "src_dirs": [], "extra_src_dirs": [ "" ], @@ -950,61 +998,88 @@ mod tests { "macros": { "COMMON_TEST": "true", "TEST": "true" - } + }, + "name": "app_a_SUITE", + "src_dirs": [] }, { - "name": "common", - "dir": "/[prelude]//erlang/common_test/common", + "dir": "/[prelude]//erlang/common_test/test_exec/src", + "extra_src_dirs": [], + "include_dirs": [], + "macros": {}, + "name": "test_exec", "src_dirs": [ - "src" - ], + "" + ] + }, + { + "dir": "/[prelude]//erlang/common_test/common", "extra_src_dirs": [], "include_dirs": [ "include" ], - "macros": {} + "macros": {}, + "name": "common", + "src_dirs": [ + "src" + ] }, { - "name": "cth_hooks", "dir": "/[prelude]//erlang/common_test/cth_hooks/src", - "src_dirs": [ - "" - ], "extra_src_dirs": [], "include_dirs": [ "" ], - "macros": {} + "macros": {}, + "name": "cth_hooks", + "src_dirs": [ + "" + ] }, { - "name": "buck2_shell_utils", "dir": "/[prelude]//erlang/shell/src", - "src_dirs": [ - "" - ], "extra_src_dirs": [], "include_dirs": [], - "macros": {} + "macros": {}, + "name": "buck2_shell_utils", + "src_dirs": [ + "" + ] + }, + { + "dir": "app_a", + "extra_src_dirs": [], + "include_dirs": [ + "include" + ], + "macros": { + "COMMON_TEST": "true", + "TEST": "true" + }, + "name": "diagnostics_app_a", + "src_dirs": [ + "src" + ] }, { - "name": "test_binary", "dir": "/[prelude]//erlang/common_test/test_binary/src", - "src_dirs": [ - "" - ], "extra_src_dirs": [], "include_dirs": [], - "macros": {} + "macros": {}, + "name": "test_binary", + "src_dirs": [ + "" + ] }, { - "name": "test_cli_lib", "dir": "/[prelude]//erlang/common_test/test_cli_lib/src", - "src_dirs": [ - "" - ], "extra_src_dirs": [], "include_dirs": [], - "macros": {} + "macros": {}, + "name": "test_cli_lib", + "src_dirs": [ + "" + ] } ], "deps": [] @@ -1019,6 +1094,12 @@ mod tests { content.replace(prelude_cell, "/[prelude]/") } + fn sort_json(content: &str) -> String { + let mut json: serde_json::Value = serde_json::from_str(content).unwrap(); + json.sort_all_objects(); + serde_json::to_string_pretty(&json).unwrap() + } + #[test] #[ignore] fn build_info_json_buck_bxl_generated() { @@ -1032,7 +1113,7 @@ mod tests { "--to", tmp_file.clone(), "--project", - path_str + path_str.clone() ]; let (stdout, stderr, code) = elp(args); assert_eq!( @@ -1042,13 +1123,14 @@ mod tests { ); assert!( stderr.is_empty(), - "expected stderr to be empty, got:\n{}", - stderr + "expected stderr to be empty, got:\n{stderr}" ); assert!(tmp_file.clone().exists()); let content = fs::read_to_string(tmp_file).unwrap(); let mut buck_config = BuckConfig::default(); - buck_config.buck_root = Some(AbsPathBuf::assert_utf8(current_dir().unwrap())); + buck_config.buck_root = Some(AbsPathBuf::assert_utf8( + current_dir().unwrap().join(path_str.clone()), + )); let prelude_cell = get_prelude_cell(&buck_config).expect("could not get prelude"); let prelude_cell = prelude_cell.strip_prefix("/").unwrap(); let content = content.replace(prelude_cell, "/[prelude]/"); @@ -1207,6 +1289,24 @@ mod tests { ); } + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn lint_application(buck: bool) { + simple_snapshot_expect_error( + args_vec![ + "lint", + "--application", + "app_a", + "--diagnostic-filter", + "P1700", + ], + "linter", + expect_file!("../resources/test/linter/parse_elp_lint_app.stdout"), + buck, + None, + ); + } + #[test_case(false ; "rebar")] #[test_case(true ; "buck")] fn lint_report_suppressed(buck: bool) { @@ -1266,6 +1366,7 @@ mod tests { check_lint_fix( args_vec![ "lint", + "--no-stream", "--diagnostic-filter", "W0010", "--experimental", @@ -1293,6 +1394,7 @@ mod tests { check_lint_fix( args_vec![ "lint", + "--no-stream", "--diagnostic-filter", "W0010", "--experimental", @@ -1318,7 +1420,7 @@ mod tests { fn lint_config_file_used(buck: bool) { let tmp_dir = make_tmp_dir(); let tmp_path = tmp_dir.path(); - check_lint_fix( + check_lint_fix_stderr_sorted( args_vec![ "lint", "--diagnostic-filter", @@ -1335,6 +1437,9 @@ mod tests { Path::new("../resources/test/lint/lint_recursive"), &[], false, + Some(expect![[r#" + Errors found + "#]]), ) .expect("bad test"); } @@ -1349,7 +1454,7 @@ mod tests { "lint", "--experimental", "--config-file", - "../../test_projects/linter/does_not_exist.toml" + "../../test/test_projects/linter/does_not_exist.toml" ], "linter", expect_file!("../resources/test/linter/parse_elp_lint_custom_config_invalid_output.stdout"), @@ -1361,7 +1466,7 @@ mod tests { &[], false, Some(expect![[r#" - unable to read "../../test_projects/linter/does_not_exist.toml": No such file or directory (os error 2) + unable to read "../../test/test_projects/linter/does_not_exist.toml": No such file or directory (os error 2) "#]]), ) .expect("bad test"); @@ -1372,12 +1477,12 @@ mod tests { fn lint_custom_config_file_used(buck: bool) { let tmp_dir = make_tmp_dir(); let tmp_path = tmp_dir.path(); - check_lint_fix( + check_lint_fix_stderr_sorted( args_vec![ "lint", "--experimental", "--config-file", - "../../test_projects/linter/elp_lint_test1.toml" + "../../test/test_projects/linter/elp_lint_test1.toml" ], "linter", expect_file!("../resources/test/linter/parse_elp_lint_custom_config_output.stdout"), @@ -1388,6 +1493,7 @@ mod tests { Path::new("../resources/test/lint/lint_recursive"), &[], false, + None, ) .expect("bad test"); } @@ -1402,7 +1508,7 @@ mod tests { "lint", "--experimental", "--config-file", - "../../test_projects/linter/elp_lint_adhoc.toml", + "../../test/test_projects/linter/elp_lint_adhoc.toml", "--module", "app_b", "--apply-fix", @@ -1426,14 +1532,14 @@ mod tests { #[test_case(false ; "rebar")] #[test_case(true ; "buck")] fn lint_diagnostic_ignore(buck: bool) { - simple_snapshot( + simple_snapshot_sorted( args_vec![ "lint", "--experimental", "--diagnostic-ignore", "W0011", "--config-file", - "../../test_projects/linter/elp_lint_test_ignore.toml" + "../../test/test_projects/linter/elp_lint_test_ignore.toml" ], "linter", expect_file!("../resources/test/linter/parse_elp_lint_ignore.stdout"), @@ -1477,7 +1583,7 @@ mod tests { &[], false, Some(expect![[r#" - failed to read "../../test_projects/linter_bad_config/.elp_lint.toml":expected a right bracket, found an identifier at line 6 column 4 + failed to read "../../test/test_projects/linter_bad_config/.elp_lint.toml":expected a right bracket, found an identifier at line 6 column 4 "#]]), ) .expect("bad test"); @@ -1486,8 +1592,8 @@ mod tests { #[test_case(false ; "rebar")] #[test_case(true ; "buck")] fn lint_no_diagnostics_filter_all_enabled(buck: bool) { - simple_snapshot_expect_error( - args_vec!["lint",], + simple_snapshot_expect_error_sorted( + args_vec!["lint"], "linter", expect_file!("../resources/test/linter/parse_elp_no_lint_specified_output.stdout"), buck, @@ -1495,10 +1601,24 @@ mod tests { ); } + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn lint_no_stream_produces_output(buck: bool) { + if otp::supports_eep66_sigils() { + simple_snapshot_expect_error( + args_vec!["lint", "--no-stream"], + "diagnostics", + expect_file!("../resources/test/diagnostics/lint_no_stream.stdout"), + buck, + None, + ); + } + } + #[test_case(false ; "rebar")] #[test_case(true ; "buck")] fn lint_no_diagnostics_filter_all_enabled_json(buck: bool) { - simple_snapshot_expect_error( + simple_snapshot_expect_error_sorted( args_vec!["lint", "--format", "json"], "linter", expect_file!("../resources/test/linter/parse_elp_no_lint_specified_json_output.stdout"), @@ -1525,11 +1645,11 @@ mod tests { fn lint_explicit_enable_diagnostic(buck: bool) { let tmp_dir = make_tmp_dir(); let tmp_path = tmp_dir.path(); - check_lint_fix( + check_lint_fix_stderr_sorted( args_vec![ "lint", "--config-file", - "../../test_projects/linter/elp_lint_test2.toml" + "../../test/test_projects/linter/elp_lint_test2.toml" ], "linter", expect_file!("../resources/test/linter/parse_elp_lint_explicit_enable_output.stdout"), @@ -1540,6 +1660,9 @@ mod tests { Path::new("../resources/test/lint/lint_recursive"), &[], false, + Some(expect![[r#" + Errors found + "#]]), ) .expect("bad test"); } @@ -1549,7 +1672,7 @@ mod tests { fn lint_json_output(buck: bool) { let tmp_dir = make_tmp_dir(); let tmp_path = tmp_dir.path(); - check_lint_fix( + check_lint_fix_stderr_sorted( args_vec![ "lint", "--diagnostic-filter", @@ -1567,35 +1690,9 @@ mod tests { Path::new("../resources/test/lint/lint_recursive"), &[], false, - ) - .expect("bad test"); - } - - #[test_case(false ; "rebar")] - #[test_case(true ; "buck")] - fn lint_json_output_prefix(buck: bool) { - let tmp_dir = make_tmp_dir(); - let tmp_path = tmp_dir.path(); - check_lint_fix( - args_vec![ - "lint", - "--diagnostic-filter", - "W0010", - "--experimental", - "--format", - "json", - "--prefix", - "my/prefix" - ], - "linter", - expect_file!("../resources/test/linter/parse_elp_lint_json_output_prefix.stdout"), - 101, - buck, - None, - tmp_path, - Path::new("../resources/test/lint/lint_recursive"), - &[], - false, + Some(expect![[r#" + Errors found + "#]]), ) .expect("bad test"); } @@ -1605,7 +1702,7 @@ mod tests { fn lint_applies_fix_using_to_dir(buck: bool) { let tmp_dir = make_tmp_dir(); let tmp_path = tmp_dir.path(); - check_lint_fix( + check_lint_fix_stderr( args_vec![ "lint", "--module", @@ -1614,7 +1711,7 @@ mod tests { "P1700", "--to", tmp_path, - "--apply-fix" + "--apply-fix", ], "diagnostics", expect_file!("../resources/test/diagnostics/parse_elp_lint_fix.stdout"), @@ -1625,6 +1722,9 @@ mod tests { Path::new("../resources/test/lint/head_mismatch"), &[("app_a/src/lints.erl", "lints.erl")], false, + Some(expect![[r#" + Errors found + "#]]), ) .expect("Bad test"); } @@ -1634,7 +1734,7 @@ mod tests { fn lint_applies_fix_using_to_dir_json_output(buck: bool) { let tmp_dir = make_tmp_dir(); let tmp_path = tmp_dir.path(); - check_lint_fix( + check_lint_fix_stderr( args_vec![ "lint", "--module", @@ -1656,6 +1756,9 @@ mod tests { Path::new("../resources/test/lint/head_mismatch"), &[("app_a/src/lints.erl", "lints.erl")], false, + Some(expect![[r#" + Errors found + "#]]), ) .expect("Bad test"); } @@ -1676,7 +1779,7 @@ mod tests { fn do_lint_applies_fix_in_place(buck: bool) { let project = "in_place_tests"; - check_lint_fix( + check_lint_fix_stderr( args_vec![ "lint", "--module", @@ -1695,6 +1798,9 @@ mod tests { Path::new("../resources/test/lint/head_mismatch"), &[("app_a/src/lints.erl", "app_a/src/lints.erl")], true, + Some(expect![[r#" + Errors found + "#]]), ) .expect("Bad test"); } @@ -1734,7 +1840,7 @@ mod tests { fn lint_applies_code_action_fixme_if_requested(buck: bool) { let tmp_dir = make_tmp_dir(); let tmp_path = tmp_dir.path(); - check_lint_fix( + check_lint_fix_stderr( args_vec![ "lint", "--module", @@ -1755,6 +1861,9 @@ mod tests { Path::new("../resources/test/lint/ignore_app_env"), &[("app_a/src/spelling.erl", "spelling.erl")], false, + Some(expect![[r#" + Errors found + "#]]), ) .expect("Bad test"); } @@ -1775,7 +1884,7 @@ mod tests { #[test_case(false ; "rebar")] #[test_case(true ; "buck")] fn lint_edoc(buck: bool) { - simple_snapshot( + simple_snapshot_sorted( args_vec![ "lint", "--include-edoc-diagnostics", @@ -1809,7 +1918,7 @@ mod tests { #[test_case(false ; "rebar")] #[test_case(true ; "buck")] fn lint_ct_include_tests(buck: bool) { - simple_snapshot_expect_error( + simple_snapshot_expect_error_sorted( args_vec![ "lint", "--include-ct-diagnostics", @@ -1827,8 +1936,8 @@ mod tests { #[test] fn lint_resolves_generated_includes() { if cfg!(feature = "buck") { - simple_snapshot_expect_error( - args_vec!["lint"], + simple_snapshot_expect_error_sorted( + args_vec!["lint", "--module", "top_includer",], "buck_tests_2", expect_file!("../resources/test/buck_tests_2/resolves_generated_includes.stdout"), true, @@ -1843,7 +1952,8 @@ mod tests { simple_snapshot_expect_stderror( args_vec!["lint",], "buck_bad_config", - expect_file!("../resources/test/buck_bad_config/bxl_error_message.stdout"), + // @fb-only: expect_file!("../resources/test/buck_bad_config/bxl_error_message.stdout"), + expect_file!("../resources/test/buck_bad_config/bxl_error_message_oss.stdout"), // @oss-only true, None, true, @@ -1851,6 +1961,320 @@ mod tests { } } + #[test] + fn lint_warnings_as_errors() { + simple_snapshot_expect_error_sorted( + args_vec![ + "lint", + "--no-stream" + "--config-file", + "../../test/test_projects/linter/elp_lint_warnings_as_errors.toml" + ], + "linter", + expect_file!("../resources/test/linter/warnings_as_errors.stdout"), + true, + None, + ) + } + + #[test] + fn lint_custom_function_matches() { + simple_snapshot( + args_vec![ + "lint", + "--config-file", + "../../test/test_projects/linter/elp_lint_custom_function_matches.toml", + "--module", + "custom_function_matches" + ], + "linter", + expect_file!("../resources/test/linter/custom_function_matches.stdout"), + true, + None, + ) + } + + #[test] + fn lint_unavailable_type() { + simple_snapshot( + args_vec![ + "lint", + "--config-file", + "../../test/test_projects/xref/elp_lint_unavailable_type.toml", + "--module", + "unavailable_type" + ], + "xref", + expect_file!("../resources/test/xref/unavailable_type.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_from_config() { + simple_snapshot_sorted( + args_vec![ + "lint", + "--config-file", + "../../test/test_projects/linter/elp_lint_ssr_adhoc.toml", + ], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_from_bad_config() { + simple_snapshot_expect_stderror( + args_vec![ + "lint", + "--config-file", + "../../test/test_projects/linter/elp_lint_ssr_adhoc_parse_fail.toml", + ], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_parse_fail.stdout"), + true, + None, + false, + ) + } + + #[test] + fn lint_ssr_as_cli_arg() { + simple_snapshot( + args_vec!["ssr", "ssr: {_@A, _@B}.",], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_arg_without_prefix() { + simple_snapshot( + args_vec!["ssr", "{_@A, _@B}",], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_with_context_and_separator() { + simple_snapshot( + args_vec![ + "--colour", + "never", + "ssr", + "--context", + "2", + "--group-separator", + "====", + "{_@A, _@B}", + ], + "linter", + expect_file!("../resources/test/linter/ssr_context_separator.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_with_context_and_separator_color() { + simple_snapshot( + args_vec![ + "--colour", + "always", + "ssr", + "--context", + "2", + "--group-separator", + "====", + "{_@A, _@B}", + ], + "linter", + expect_file!("../resources/test/linter/ssr_context_separator_color.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_arg_multiple_patterns() { + simple_snapshot( + args_vec!["ssr", "3" "{4}",], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_multiple.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_arg_malformed() { + simple_snapshot_expect_stderror( + args_vec!["ssr", "ssr: {_@A, = _@B}.",], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_parse_error.stdout"), + true, + None, + false, + ) + } + + #[test] + fn lint_ssr_as_cli_parens_visible() { + simple_snapshot( + args_vec!["ssr", "--parens", "(_@A)",], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_parens_visible.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_parens_invisible() { + // Invisible parens are the default + simple_snapshot( + args_vec!["ssr", "(((3)))",], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_parens_invisible.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_macros_expand() { + simple_snapshot( + args_vec!["ssr", "--macros", "expand", "?BAR(_@AA)", "{4}"], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_macros_expand.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_macros_expand_is_default() { + simple_snapshot( + args_vec!["ssr", "?BAR(_@AA)", "{4}"], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_macros_expand.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_macros_visible_expand() { + simple_snapshot( + args_vec!["ssr", "--macros", "visible-expand", "?BAR(_@AA)", "{4}"], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_macros_visible_expand.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_macros_no_expand() { + simple_snapshot( + args_vec!["ssr", "--macros", "no-expand", "?BAR(_@AA)", "{4}"], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_macros_no_expand.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_dump_config() { + simple_snapshot( + args_vec!["ssr", "--dump-config", "?BAR(_@AA)", "{4}"], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_dump_config.stdout"), + true, + None, + ) + } + + #[test] + fn lint_ssr_as_cli_dump_config_without_info() { + simple_snapshot( + args_vec!["ssr", "--dump-config", "?BAR(_@AA)", "{4}"], + "linter", + expect_file!("../resources/test/linter/ssr_ad_hoc_cli_dump_config.stdout"), + true, + None, + ) + } + + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn ssr_exclude_generated_by_default(buck: bool) { + simple_snapshot( + args_vec!["ssr", "--module", "erlang_diagnostics_errors_gen", "ok"], + "diagnostics", + expect_file!("../resources/test/diagnostics/ssr_exclude_generated.stdout"), + buck, + None, + ); + } + + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn ssr_include_generated_when_requested(buck: bool) { + simple_snapshot( + args_vec![ + "ssr", + "--module", + "erlang_diagnostics_errors_gen", + "--include-generated", + "ok" + ], + "diagnostics", + expect_file!("../resources/test/diagnostics/ssr_include_generated.stdout"), + buck, + None, + ); + } + + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + // We cannot use `should_panic` for this test, since the OSS CI runs with the `buck` feature disabled. + // When this happens the test is translated into a no-op, which does not panic. + // TODO(T248259687): Switch to should_panic once Buck2 is available on GitHub. + // Or remove the ignore once hierarchical support is implemented. + #[ignore] // Support for hierarchical config is not implemented yet + fn lint_hierarchical_config_basic(buck: bool) { + simple_snapshot_sorted( + args_vec!["lint", "--read-config"], + "hierarchical_config", + expect_file!("../resources/test/hierarchical_config/basic.stdout"), + buck, + None, + ); + } + + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn lint_linter_config_basic(buck: bool) { + simple_snapshot_sorted( + args_vec!["lint", "--read-config", "--no-stream"], + "linter_config", + expect_file!("../resources/test/linter_config/basic.stdout"), + buck, + None, + ); + } + #[test_case(false ; "rebar")] #[test_case(true ; "buck")] fn eqwalizer_tests_check(buck: bool) { @@ -1952,6 +2376,147 @@ mod tests { } } + #[test] + fn eqwalize_specific_module_overrides_ignore_modules() { + if otp_supported_by_eqwalizer() { + simple_snapshot_expect_error( + args_vec!["eqwalize", "--bail-on-error", "app_b"], + "eqwalizer_ignore_modules", + expect_file!( + "../resources/test/eqwalizer_ignore_modules/eqwalize_bail_on_error_failure.pretty" + ), + true, + None, + ); + } + } + + #[test] + fn eqwalize_all_ignore_modules_success() { + if otp_supported_by_eqwalizer() { + simple_snapshot( + args_vec!["eqwalize-all", "--bail-on-error"], + "eqwalizer_ignore_modules", + expect_file!( + "../resources/test/eqwalizer_ignore_modules/eqwalize_all_bail_on_error_success.pretty" + ), + true, + None, + ); + } + } + + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn eqwalize_with_color_vs_no_color(buck: bool) { + if otp_supported_by_eqwalizer() { + // Test with color (default) + let (mut args_color, _path) = + add_project(args_vec!["eqwalize", "app_a"], "standard", None, None); + if !buck { + args_color.push("--rebar".into()); + } + + // Test without color + let (mut args_no_color, _) = add_project( + args_vec!["--color", "never", "eqwalize", "app_a"], + "standard", + None, + None, + ); + if !buck { + args_no_color.push("--rebar".into()); + } + + let (stdout_color, stderr_color, code_color) = elp(args_color); + let (stdout_no_color, stderr_no_color, code_no_color) = elp(args_no_color); + + // Both should have same exit code + assert_eq!(code_color, code_no_color); + + // Both should have same stderr behavior + if code_color == 0 { + assert!(stderr_color.is_empty()); + assert!(stderr_no_color.is_empty()); + } + + // The content should be similar but no-color version should not contain ANSI escape codes + // ANSI color codes typically start with \x1b[ or \u{1b}[ + let _has_ansi_color = stdout_color.contains('\x1b'); + let has_ansi_no_color = stdout_no_color.contains('\x1b'); + + // With --color never, there should be no ANSI escape sequences + assert!( + !has_ansi_no_color, + "Output with --color never should not contain ANSI escape codes" + ); + + // The outputs should be functionally equivalent when ANSI codes are stripped + let stripped_color = strip_ansi_codes(&stdout_color); + assert_eq!( + stripped_color, stdout_no_color, + "Content should be identical after stripping ANSI codes" + ); + } + } + + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn eqwalize_with_no_color_env_var(buck: bool) { + if otp_supported_by_eqwalizer() { + // Test with NO_COLOR environment variable set + unsafe { + env::set_var("NO_COLOR", "1"); + } + + let (mut args_no_color_env, _) = + add_project(args_vec!["eqwalize", "app_a"], "standard", None, None); + if !buck { + args_no_color_env.push("--rebar".into()); + } + + let (stdout_no_color_env, stderr_no_color_env, code_no_color_env) = + elp(args_no_color_env); + + // Clean up environment variable + unsafe { + env::remove_var("NO_COLOR"); + } + + // Test with normal color (for comparison) + let (mut args_color, _) = + add_project(args_vec!["eqwalize", "app_a"], "standard", None, None); + if !buck { + args_color.push("--rebar".into()); + } + + let (stdout_color, stderr_color, code_color) = elp(args_color); + + // Both should have same exit code + assert_eq!(code_color, code_no_color_env); + + // Both should have same stderr behavior + if code_color == 0 { + assert!(stderr_color.is_empty()); + assert!(stderr_no_color_env.is_empty()); + } + + // The NO_COLOR env var version should not contain ANSI escape codes + let has_ansi_no_color_env = stdout_no_color_env.contains('\x1b'); + assert!( + !has_ansi_no_color_env, + "Output with NO_COLOR env var should not contain ANSI escape codes" + ); + + // The outputs should be functionally equivalent when ANSI codes are stripped + let stripped_color = strip_ansi_codes(&stdout_color); + assert_eq!( + stripped_color, stdout_no_color_env, + "Content should be identical after stripping ANSI codes" + ); + } + } + // ----------------------------------------------------------------- #[test] @@ -2042,6 +2607,26 @@ mod tests { expected.assert_eq(&stdout); } + #[test] + fn ssr_help() { + let args = args::args() + .run_inner(Args::from(&["ssr", "--help"])) + .unwrap_err(); + let expected = expect_file!["../resources/test/ssr_help.stdout"]; + let stdout = args.unwrap_stdout(); + expected.assert_eq(&stdout); + } + + #[test] + fn search_help() { + let args = args::args() + .run_inner(Args::from(&["search", "--help"])) + .unwrap_err(); + let expected = expect_file!["../resources/test/ssr_help.stdout"]; + let stdout = args.unwrap_stdout(); + expected.assert_eq(&stdout); + } + #[test] fn build_info_help() { let args = args::args() @@ -2150,6 +2735,32 @@ mod tests { } } + #[test_case(false ; "rebar")] + #[test_case(true ; "buck")] + fn include_lib_non_dependency_fails(buck: bool) { + if buck { + simple_snapshot_expect_error( + args_vec!["parse-elp", "--module", "main_app"], + "include_lib_dependency_test", + expect_file!( + "../resources/test/include_lib_dependency_test/include_lib_non_dependency_fails.stdout" + ), + buck, + None, + ); + } else { + simple_snapshot_expect_error( + args_vec!["parse-elp", "--module", "main_app"], + "include_lib_dependency_test", + expect_file!( + "../resources/test/include_lib_dependency_test/include_lib_non_dependency_rebar.stdout" + ), + buck, + None, + ); + }; + } + #[track_caller] fn simple_snapshot( args: Vec, @@ -2179,15 +2790,13 @@ mod tests { let (stdout, stderr, code) = elp(args); assert_eq!( code, expected_code, - "failed with unexpected exit code: got {} not {}\nstdout:\n{}\nstderr:\n{}", - code, expected_code, stdout, stderr + "failed with unexpected exit code: got {code} not {expected_code}\nstdout:\n{stdout}\nstderr:\n{stderr}" ); assert_normalised_file(expected, &stdout, path, false); if expected_code == 0 { assert!( stderr.is_empty(), - "expected stderr to be empty, got:\n{}", - stderr + "expected stderr to be empty, got:\n{stderr}" ) } } @@ -2203,16 +2812,14 @@ mod tests { let (stdout, stderr, code) = elp(args); assert_eq!( code, expected_code, - "failed with unexpected exit code: got {} not {}\nstdout:\n{}\nstderr:\n{}", - code, expected_code, stdout, stderr + "failed with unexpected exit code: got {code} not {expected_code}\nstdout:\n{stdout}\nstderr:\n{stderr}" ); let path = PathBuf::from(""); assert_normalised_file(expected, &stdout, path, false); if expected_code == 0 { assert!( stderr.is_empty(), - "expected stderr to be empty, got:\n{}", - stderr + "expected stderr to be empty, got:\n{stderr}" ) } } @@ -2232,13 +2839,67 @@ mod tests { let (stdout, stderr, code) = elp(args); assert_eq!( code, 101, - "Expected exit code 101, got: {}\nstdout:\n{}\nstderr:\n{}", - code, stdout, stderr + "Expected exit code 101, got: {code}\nstdout:\n{stdout}\nstderr:\n{stderr}" ); assert_normalised_file(expected, &stdout, path, false); } } + fn simple_snapshot_expect_error_sorted( + args: Vec, + project: &str, + expected: ExpectFile, + buck: bool, + file: Option<&str>, + ) { + if !buck || cfg!(feature = "buck") { + let (mut args, path) = add_project(args, project, file, None); + if !buck { + args.push("--rebar".into()); + } + let (stdout, stderr, code) = elp(args); + assert_eq!( + code, 101, + "Expected exit code 101, got: {code}\nstdout:\n{stdout}\nstderr:\n{stderr}" + ); + let sorted_stdout = sort_lines(&stdout); + assert_normalised_file(expected, &sorted_stdout, path, false); + } + } + + fn sort_lines(s: &str) -> String { + let mut lines: Vec<&str> = s.lines().collect(); + lines.sort(); + lines.join("\n") + } + + #[track_caller] + fn simple_snapshot_sorted( + args: Vec, + project: &str, + expected: ExpectFile, + buck: bool, + file: Option<&str>, + ) { + if !buck || cfg!(feature = "buck") { + let (mut args, path) = add_project(args, project, file, None); + if !buck { + args.push("--rebar".into()); + } + let (stdout, stderr, code) = elp(args); + assert_eq!( + code, 0, + "failed with unexpected exit code: got {code} not 0\nstdout:\n{stdout}\nstderr:\n{stderr}" + ); + let sorted_stdout = sort_lines(&stdout); + assert_normalised_file(expected, &sorted_stdout, path, false); + assert!( + stderr.is_empty(), + "expected stderr to be empty, got:\n{stderr}" + ) + } + } + fn simple_snapshot_expect_stderror( args: Vec, project: &str, @@ -2255,13 +2916,47 @@ mod tests { let (stdout, stderr, code) = elp(args); assert_eq!( code, 101, - "Expected exit code 101, got: {}\nstdout:\n{}\nstderr:\n{}", - code, stdout, stderr + "Expected exit code 101, got: {code}\nstdout:\n{stdout}\nstderr:\n{stderr}" ); assert_normalised_file(expected, &stderr, path, normalise_urls); } } + #[track_caller] + fn simple_snapshot_output_contains( + args: Vec, + project: &str, + expected_patterns: &[&str], + buck: bool, + file: Option<&str>, + expected_code: i32, + ) { + if !buck || cfg!(feature = "buck") { + let (mut args, _path) = add_project(args, project, file, None); + if !buck { + args.push("--rebar".into()); + } + let (stdout, stderr, code) = elp(args); + assert_eq!( + code, expected_code, + "Expected exit code {expected_code}, got: {code}\nstdout:\n{stdout}\nstderr:\n{stderr}" + ); + + if expected_code == 0 { + assert!(stderr.is_empty(), "Expected empty stderr, got:\n{stderr}"); + } + + for pattern in expected_patterns { + assert!( + stdout.contains(pattern), + "Expected stdout to contain '{}', but got:\n{}", + pattern, + stdout + ); + } + } + } + #[allow(clippy::too_many_arguments)] fn check_lint_fix( args: Vec, @@ -2319,11 +3014,12 @@ mod tests { let (stdout, stderr, code) = elp(args); assert_eq!( code, expected_code, - "Expected exit code {expected_code}, got: {}\nstdout:\n{}\nstderr:\n{}", - code, stdout, stderr + "Expected exit code {expected_code}, got: {code}\nstdout:\n{stdout}\nstderr:\n{stderr}" ); if let Some(expected_stderr) = expected_stderr { expected_stderr.assert_eq(&stderr); + } else { + expect![[""]].assert_eq(&stderr); } assert_normalised_file(expected, &stdout, path, false); for (expected_file, file) in files { @@ -2337,6 +3033,55 @@ mod tests { Ok(()) } + #[allow(clippy::too_many_arguments)] + fn check_lint_fix_stderr_sorted( + args: Vec, + project: &str, + expected: ExpectFile, + expected_code: i32, + buck: bool, + file: Option<&str>, + actual_dir: &Path, + expected_dir: &Path, + files: &[(&str, &str)], + backup_files: bool, + expected_stderr: Option, + ) -> Result<()> { + if !buck || cfg!(feature = "buck") { + let (mut args, path) = add_project(args, project, file, None); + if !buck { + args.push("--rebar".into()); + } + let orig_files = files.iter().map(|x| x.0).collect::>(); + // Take a backup. The Drop instance will restore at the end + let _backup = if backup_files { + BackupFiles::save_files(project, &orig_files) + } else { + BackupFiles::save_files(project, &[]) + }; + let (stdout, stderr, code) = elp(args); + assert_eq!( + code, expected_code, + "Expected exit code {expected_code}, got: {code}\nstdout:\n{stdout}\nstderr:\n{stderr}" + ); + if let Some(expected_stderr) = expected_stderr { + expected_stderr.assert_eq(&stderr); + } else { + expect![[""]].assert_eq(&stderr); + } + let sorted_stdout = sort_lines(&stdout); + assert_normalised_file(expected, &sorted_stdout, path, false); + for (expected_file, file) in files { + let expected = expect_file!(expected_dir.join(expected_file)); + let actual = actual_dir.join(file); + assert!(actual.exists()); + let content = fs::read_to_string(actual).unwrap(); + expected.assert_eq(content.as_str()); + } + } + Ok(()) + } + fn assert_normalised_file( expected: ExpectFile, actual: &str, @@ -2377,7 +3122,7 @@ mod tests { let project_path: PathBuf = path_str.clone().into(); args.push("--project".into()); if let Some(json_file) = json { - let full_file = format!("{}/{}", path_str, json_file); + let full_file = format!("{path_str}/{json_file}"); args.push(full_file.into()); } else { args.push(path_str.into()); @@ -2391,7 +3136,14 @@ mod tests { } fn project_path(project: &str) -> String { - format!("../../test_projects/{}", project) + format!("../../test/test_projects/{project}") + } + + fn strip_ansi_codes(s: &str) -> String { + lazy_static! { + static ref ANSI_RE: Regex = Regex::new(r"\x1b\[[0-9;]*m").unwrap(); + } + ANSI_RE.replace_all(s, "").to_string() } struct BackupFiles { diff --git a/crates/elp/src/bin/reporting.rs b/crates/elp/src/bin/reporting.rs index 7b447d2bfd..7ab10459fe 100644 --- a/crates/elp/src/bin/reporting.rs +++ b/crates/elp/src/bin/reporting.rs @@ -29,6 +29,7 @@ use elp::cli::Cli; use elp::convert; use elp::memory_usage::MemoryUsage; use elp_ide::Analysis; +use elp_ide::AnalysisHost; use elp_ide::TextRange; use elp_ide::elp_ide_db::EqwalizerDiagnostic; use elp_ide::elp_ide_db::elp_base_db::AbsPath; @@ -38,6 +39,7 @@ use indicatif::ProgressBar; use itertools::Itertools; use lazy_static::lazy_static; use parking_lot::Mutex; +use vfs::Vfs; pub trait Reporter { fn write_eqwalizer_diagnostics( @@ -117,7 +119,7 @@ impl Reporter for PrettyReporter<'_> { let range: Range = diagnostic.range.start().into()..diagnostic.range.end().into(); let expr = match &diagnostic.expression { - Some(s) => format!("{}.\n", s), + Some(s) => format!("{s}.\n"), None => "".to_string(), }; @@ -127,7 +129,7 @@ impl Reporter for PrettyReporter<'_> { let mut labels = vec![msg_label]; if let Some(s) = &diagnostic.explanation { let explanation_label = - Label::secondary(reporting_id, range).with_message(format!("\n\n{}", s)); + Label::secondary(reporting_id, range).with_message(format!("\n\n{s}")); labels.push(explanation_label); }; let d: ReportingDiagnostic = ReportingDiagnostic::error() @@ -188,7 +190,7 @@ impl Reporter for PrettyReporter<'_> { let duration = self.start.elapsed().as_secs(); self.cli.set_color(&YELLOW_COLOR_SPEC)?; if count == total { - write!(self.cli, "eqWAlized {} module(s) in {}s", count, duration)?; + write!(self.cli, "eqWAlized {count} module(s) in {duration}s")?; } else { write!( self.cli, @@ -225,8 +227,6 @@ impl Reporter for JsonReporter<'_> { diagnostics: &[EqwalizerDiagnostic], ) -> Result<()> { let line_index = self.analysis.line_index(file_id)?; - // Pass include_Tests = false so that errors for tests files that are not opted-in are tagged as arc_types::Severity::Disabled - let eqwalizer_enabled = self.analysis.is_eqwalizer_enabled(file_id, false).unwrap(); let file_path = &self.loaded.vfs.file_path(file_id); let root_path = &self .analysis @@ -235,14 +235,10 @@ impl Reporter for JsonReporter<'_> { .root_dir; let relative_path = get_relative_path(root_path, file_path); for diagnostic in diagnostics { - let diagnostic = convert::eqwalizer_to_arc_diagnostic( - diagnostic, - &line_index, - relative_path, - eqwalizer_enabled, - ); + let diagnostic = + convert::eqwalizer_to_arc_diagnostic(diagnostic, &line_index, relative_path); let diagnostic = serde_json::to_string(&diagnostic)?; - writeln!(self.cli, "{}", diagnostic)?; + writeln!(self.cli, "{diagnostic}")?; } Ok(()) } @@ -258,9 +254,10 @@ impl Reporter for JsonReporter<'_> { "ELP".to_string(), diagnostic.msg.clone(), None, + None, ); let diagnostic = serde_json::to_string(&diagnostic)?; - writeln!(self.cli, "{}", diagnostic)?; + writeln!(self.cli, "{diagnostic}")?; } Ok(()) } @@ -281,9 +278,10 @@ impl Reporter for JsonReporter<'_> { "ELP".to_string(), description, None, + None, ); let diagnostic = serde_json::to_string(&diagnostic)?; - writeln!(self.cli, "{}", diagnostic)?; + writeln!(self.cli, "{diagnostic}")?; Ok(()) } @@ -357,12 +355,12 @@ pub(crate) fn dump_stats(cli: &mut dyn Cli, list_modules: bool) { if list_modules { writeln!(cli, "--------------start of modules----------").ok(); stats.iter().sorted().for_each(|stat| { - writeln!(cli, "{}", stat).ok(); + writeln!(cli, "{stat}").ok(); }); } writeln!(cli, "{} modules processed", stats.len()).ok(); let mem_usage = MemoryUsage::now(); - writeln!(cli, "{}", mem_usage).ok(); + writeln!(cli, "{mem_usage}").ok(); } lazy_static! { @@ -376,3 +374,28 @@ pub(crate) fn add_stat(stat: String) { let mut stats = STATS.lock(); stats.push(stat); } + +pub(crate) fn print_memory_usage( + mut host: AnalysisHost, + vfs: Vfs, + cli: &mut dyn Cli, +) -> Result<()> { + let mem = host.per_query_memory_usage(); + + let before = profile::memory_usage(); + drop(vfs); + let vfs = before.allocated - profile::memory_usage().allocated; + + let before = profile::memory_usage(); + drop(host); + let unaccounted = before.allocated - profile::memory_usage().allocated; + let remaining = profile::memory_usage().allocated; + + for (name, bytes, entries) in mem { + writeln!(cli, "{bytes:>8} {entries:>6} {name}")?; + } + writeln!(cli, "{vfs:>8} VFS")?; + writeln!(cli, "{unaccounted:>8} Unaccounted")?; + writeln!(cli, "{remaining:>8} Remaining")?; + Ok(()) +} diff --git a/crates/elp/src/bin/shell.rs b/crates/elp/src/bin/shell.rs index b00ce58fc1..13ff79ed36 100644 --- a/crates/elp/src/bin/shell.rs +++ b/crates/elp/src/bin/shell.rs @@ -15,6 +15,7 @@ use std::path::Path; use std::path::PathBuf; use std::process::Command; use std::sync::Arc; +use std::time::SystemTime; use anyhow::Result; use elp::build::load; @@ -29,6 +30,7 @@ use elp_ide::elp_ide_db::elp_base_db::SourceDatabaseExt; use elp_ide::elp_ide_db::elp_base_db::SourceRoot; use elp_ide::elp_ide_db::elp_base_db::SourceRootId; use elp_ide::elp_ide_db::elp_base_db::VfsPath; +use elp_log::telemetry; use elp_project_model::DiscoverConfig; use elp_project_model::buck::BuckQueryConfig; use paths::Utf8PathBuf; @@ -84,7 +86,7 @@ impl Watchman { } fn get_changes(&self, from: &WatchmanClock, patterns: Vec<&str>) -> Result { - let mut cmd = Command::new("watchman"); + let mut cmd = Self::cmd(); cmd.arg("since"); cmd.arg(self.watch.as_os_str()); cmd.arg(&from.clock); @@ -103,14 +105,14 @@ enum ShellError { impl fmt::Display for ShellError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - ShellError::UnexpectedCommand(cmd) => write!(f, "Unexpected command {}", cmd), + ShellError::UnexpectedCommand(cmd) => write!(f, "Unexpected command {cmd}"), ShellError::UnexpectedOption(cmd, arg) => { - write!(f, "Unexpected option {} for command {}", arg, cmd) + write!(f, "Unexpected option {arg} for command {cmd}") } ShellError::UnexpectedArg(cmd, arg) => { - write!(f, "Unexpected arg {} for command {}", arg, cmd) + write!(f, "Unexpected arg {arg} for command {cmd}") } - ShellError::MissingArg(cmd) => write!(f, "Missing arg for command {}", cmd), + ShellError::MissingArg(cmd) => write!(f, "Missing arg for command {cmd}"), } } } @@ -155,10 +157,9 @@ impl ShellCommand { } "eqwalize-app" => { let include_generated = options.contains(&"--include-generated"); - let include_tests = options.contains(&"--include-tests"); if let Some(other) = options .into_iter() - .find(|&opt| opt != "--include-generated" && opt != "--include-tests") + .find(|&opt| opt != "--include-generated") { return Err(ShellError::UnexpectedOption( "eqwalize-app".into(), @@ -175,7 +176,6 @@ impl ShellCommand { rebar, app: app.into(), include_generated, - include_tests, bail_on_error: false, }))); } @@ -183,10 +183,9 @@ impl ShellCommand { } "eqwalize-all" => { let include_generated = options.contains(&"--include-generated"); - let include_tests = options.contains(&"--include-tests"); if let Some(other) = options .into_iter() - .find(|&opt| opt != "--include-generated" && opt != "--include-tests") + .find(|&opt| opt != "--include-generated") { return Err(ShellError::UnexpectedOption( "eqwalize-all".into(), @@ -202,7 +201,6 @@ impl ShellCommand { rebar, format: None, include_generated, - include_tests, bail_on_error: false, stats: false, list_modules: false, @@ -224,10 +222,8 @@ COMMANDS: eqwalize Eqwalize specified modules --clause-coverage Use experimental clause coverage checker eqwalize-all Eqwalize all modules in the current project - --include-tests Also eqwalize test modules from project --clause-coverage Use experimental clause coverage checker eqwalize-app Eqwalize all modules in specified application - --include-tests Also eqwalize test modules from project --clause-coverage Use experimental clause coverage checker "; @@ -322,7 +318,7 @@ fn update_changes( vfs.set_file_contents(vfs_path, None); } else { let contents = - fs::read(&path).unwrap_or_else(|_| panic!("Cannot read created file {:?}", path)); + fs::read(&path).unwrap_or_else(|_| panic!("Cannot read created file {path:?}")); vfs.set_file_contents(vfs_path, Some(contents)); } }); @@ -331,6 +327,12 @@ fn update_changes( } pub fn run_shell(shell: &Shell, cli: &mut dyn Cli, query_config: &BuckQueryConfig) -> Result<()> { + let start_time = SystemTime::now(); + let mut cmd = Command::new("watchman"); + let _ = cmd.arg("--version").output().map_err(|_| { + anyhow::Error::msg("`watchman` command not found. install it from https://facebook.github.io/watchman/ to use `elp shell`.") + })?; + let watchman = Watchman::new(&shell.project) .map_err(|_err| anyhow::Error::msg( "Could not find project. Are you in an Erlang project directory, or is one specified using --project?" @@ -344,9 +346,10 @@ pub fn run_shell(shell: &Shell, cli: &mut dyn Cli, query_config: &BuckQueryConfi Mode::Shell, query_config, )?; + telemetry::report_elapsed_time("shell operational", start_time); let mut rl = rustyline::DefaultEditor::new()?; let mut last_read = watchman.get_clock()?; - write!(cli, "{}", WELCOME)?; + write!(cli, "{WELCOME}")?; loop { let readline = rl.readline("> "); match readline { @@ -368,21 +371,21 @@ pub fn run_shell(shell: &Shell, cli: &mut dyn Cli, query_config: &BuckQueryConfi last_read = update_changes(&mut loaded, &watchman, &last_read)?; match ShellCommand::parse(shell, line) { Ok(None) => (), - Ok(Some(ShellCommand::Help)) => write!(cli, "{}", HELP)?, + Ok(Some(ShellCommand::Help)) => write!(cli, "{HELP}")?, Ok(Some(ShellCommand::Quit)) => break, Ok(Some(ShellCommand::ShellEqwalize(eqwalize))) => { eqwalizer_cli::do_eqwalize_module(&eqwalize, &mut loaded, cli) - .or_else(|e| writeln!(cli, "Error: {}", e))?; + .or_else(|e| writeln!(cli, "Error: {e}"))?; } Ok(Some(ShellCommand::ShellEqwalizeApp(eqwalize_app))) => { eqwalizer_cli::do_eqwalize_app(&eqwalize_app, &mut loaded, cli) - .or_else(|e| writeln!(cli, "Error: {}", e))?; + .or_else(|e| writeln!(cli, "Error: {e}"))?; } Ok(Some(ShellCommand::ShellEqwalizeAll(eqwalize_all))) => { eqwalizer_cli::do_eqwalize_all(&eqwalize_all, &mut loaded, cli) - .or_else(|e| writeln!(cli, "Error: {}", e))?; + .or_else(|e| writeln!(cli, "Error: {e}"))?; } - Err(err) => write!(cli, "{}\n{}", err, HELP)?, + Err(err) => write!(cli, "{err}\n{HELP}")?, } } Err(ReadlineError::Interrupted) => { @@ -393,10 +396,11 @@ pub fn run_shell(shell: &Shell, cli: &mut dyn Cli, query_config: &BuckQueryConfi break; } Err(err) => { - writeln!(cli, "Error: {:?}", err)?; + writeln!(cli, "Error: {err:?}")?; break; } } } + telemetry::report_elapsed_time("shell done", start_time); Ok(()) } diff --git a/crates/elp/src/bin/ssr_cli.rs b/crates/elp/src/bin/ssr_cli.rs new file mode 100644 index 0000000000..36f26d4554 --- /dev/null +++ b/crates/elp/src/bin/ssr_cli.rs @@ -0,0 +1,717 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is dual-licensed under either the MIT license found in the + * LICENSE-MIT file in the root directory of this source tree or the Apache + * License, Version 2.0 found in the LICENSE-APACHE file in the root directory + * of this source tree. You may select, at your option, one of the + * above-listed licenses. + */ + +use std::fs; +use std::path::Path; +use std::str; +use std::thread; +use std::time::SystemTime; + +use anyhow::Result; +use anyhow::bail; +use crossbeam_channel::unbounded; +use elp::build::load; +use elp::build::types::LoadResult; +use elp::cli::Cli; +use elp::convert; +use elp::memory_usage::MemoryUsage; +use elp::otp_file_to_ignore; +use elp_eqwalizer::Mode; +use elp_ide::Analysis; +use elp_ide::AnalysisHost; +use elp_ide::diagnostics; +use elp_ide::diagnostics::DiagnosticsConfig; +use elp_ide::diagnostics::FallBackToAll; +use elp_ide::diagnostics::LintConfig; +use elp_ide::diagnostics::LintsFromConfig; +use elp_ide::diagnostics::MatchSsr; +use elp_ide::elp_ide_db::LineCol; +use elp_ide::elp_ide_db::elp_base_db::AbsPath; +use elp_ide::elp_ide_db::elp_base_db::FileId; +use elp_ide::elp_ide_db::elp_base_db::IncludeOtp; +use elp_ide::elp_ide_db::elp_base_db::ModuleName; +use elp_ide::elp_ide_db::elp_base_db::ProjectId; +use elp_ide::elp_ide_db::elp_base_db::VfsPath; +use elp_log::telemetry; +use elp_project_model::AppName; +use elp_project_model::AppType; +use elp_project_model::DiscoverConfig; +use elp_project_model::buck::BuckQueryConfig; +use hir::Semantic; +use paths::Utf8PathBuf; +use rayon::prelude::ParallelBridge; +use rayon::prelude::ParallelIterator; + +use crate::args::Ssr; +use crate::reporting; +use crate::reporting::print_memory_usage; + +fn normalize_ssr_pattern(pattern: &str) -> String { + if pattern.starts_with("ssr:") { + pattern.to_string() + } else { + format!("ssr: {}.", pattern) + } +} + +pub fn run_ssr_command( + args: &Ssr, + cli: &mut dyn Cli, + query_config: &BuckQueryConfig, + use_color: bool, +) -> Result<()> { + let start_time = SystemTime::now(); + let memory_start = MemoryUsage::now(); + + // Validate all SSR patterns early + let analysis_host = AnalysisHost::default(); + let analysis = analysis_host.analysis(); + for pattern in &args.ssr_specs { + let normalized_pattern = normalize_ssr_pattern(pattern); + match analysis.validate_ssr_pattern(&normalized_pattern) { + Ok(Ok(())) => {} + Ok(Err(e)) => bail!("invalid SSR pattern '{}': {}", pattern, e), + Err(_cancelled) => bail!("SSR pattern validation was cancelled"), + } + } + + // Parse the strategy from CLI arguments + let strategy = args.parse_strategy()?; + + // Create the lint config with all SSR patterns + let mut lint_config = LintConfig::default(); + for pattern in &args.ssr_specs { + let normalized_pattern = normalize_ssr_pattern(pattern); + let severity = if args.dump_config { + // Set the severity so that squiggles are shown in the VS Code UI + Some(diagnostics::Severity::Information) + } else { + None + }; + let ssr_lint = diagnostics::Lint::LintMatchSsr(MatchSsr { + ssr_pattern: normalized_pattern, + message: None, + strategy: Some(strategy), + severity, + }); + lint_config.ad_hoc_lints.lints.push(ssr_lint); + } + + // Build the diagnostics config + let diagnostics_config = DiagnosticsConfig::default() + .configure_diagnostics( + &lint_config, + &Some("ad-hoc: ssr-match".to_string()), + &None, + FallBackToAll::Yes, + )? + .set_include_generated(args.include_generated) + .set_experimental(false) + .set_use_cli_severity(false); + + if args.dump_config { + let result = toml::to_string::(&diagnostics_config.lints_from_config)?; + // This is a subsection of .elp_lint.toml, add subsection prefix + let result = result.replace("[[lints]]", "[[ad_hoc_lints.lints]]"); + writeln!(cli, "\n# Add this to your .elp_lint.toml")?; + writeln!(cli, "{}", result)?; + return Ok(()); + } + + // Load the project + let mut loaded = load_project(args, cli, query_config)?; + telemetry::report_elapsed_time("ssr operational", start_time); + + let r = run_ssr(cli, &mut loaded, &diagnostics_config, args, use_color); + + telemetry::report_elapsed_time("ssr done", start_time); + + let memory_end = MemoryUsage::now(); + let memory_used = memory_end - memory_start; + + // Print memory usage at the end if requested and format is normal + if args.is_format_normal() && args.report_system_stats { + print_memory_usage(loaded.analysis_host, loaded.vfs, cli)?; + writeln!(cli, "{}", memory_used)?; + } + r +} + +pub fn run_ssr( + cli: &mut dyn Cli, + loaded: &mut LoadResult, + diagnostics_config: &DiagnosticsConfig, + args: &Ssr, + use_color: bool, +) -> Result<()> { + let analysis = loaded.analysis(); + let (file_id, name) = match &args.module { + Some(module) => match analysis.module_file_id(loaded.project_id, module)? { + Some(file_id) => { + if args.is_format_normal() { + writeln!(cli, "module specified: {module}")?; + } + (Some(file_id), analysis.module_name(file_id)?) + } + None => panic!("Module not found: {module}"), + }, + None => match &args.file { + Some(file_name) => { + if args.is_format_normal() { + writeln!(cli, "file specified: {file_name}")?; + } + let path_buf = Utf8PathBuf::from_path_buf(fs::canonicalize(file_name).unwrap()) + .expect("UTF8 conversion failed"); + let path = AbsPath::assert(&path_buf); + let path = path.as_os_str().to_str().unwrap(); + ( + loaded + .vfs + .file_id(&VfsPath::new_real_path(path.to_string())) + .map(|(id, _)| id), + path_buf.as_path().file_name().map(ModuleName::new), + ) + } + None => (None, None), + }, + }; + + let mut match_count = 0; + + match (file_id, name) { + (None, _) => { + // Streaming case: process all modules + let project_id = loaded.project_id; + do_parse_all_streaming( + cli, + &analysis, + &project_id, + diagnostics_config, + args, + use_color, + loaded, + &mut match_count, + )?; + } + (Some(file_id), Some(name)) => { + if let Some(app) = &args.app + && let Ok(Some(file_app)) = analysis.file_app_name(file_id) + && file_app != AppName(app.to_string()) + { + panic!("Module {} does not belong to app {}", name.as_str(), app) + } + if let Some(diag) = do_parse_one(&analysis, diagnostics_config, file_id, &name, args)? { + match_count = 1; + print_single_result(cli, loaded, &diag, args, use_color)?; + } + } + (Some(file_id), _) => { + panic!("Could not get name from file_id for {file_id:?}") + } + }; + + if match_count == 0 { + if args.is_format_normal() { + writeln!(cli, "No matches found")?; + } + } else if args.is_format_normal() { + writeln!(cli, "\nMatches found in {} modules", match_count)?; + } + + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +fn do_parse_all_streaming( + cli: &mut dyn Cli, + analysis: &Analysis, + project_id: &ProjectId, + config: &DiagnosticsConfig, + args: &Ssr, + use_color: bool, + loaded: &mut LoadResult, + match_count: &mut usize, +) -> Result<()> { + let module_index = analysis.module_index(*project_id).unwrap(); + let app_name = args.app.as_ref().map(|name| AppName(name.to_string())); + + // Create a channel for streaming results + let (tx, rx) = unbounded(); + + // Spawn a thread to process modules in parallel and send results + let analysis_clone = analysis.clone(); + let config_clone = config.clone(); + let args_clone = args.clone(); + + // Collect modules into an owned vector + let modules: Vec<_> = module_index + .iter_own() + .map(|(name, source, file_id)| (name.as_str().to_string(), source, file_id)) + .collect(); + + thread::spawn(move || { + modules + .into_iter() + .par_bridge() + .map_with( + (analysis_clone, tx), + |(db, tx), (module_name, _file_source, file_id)| { + if !otp_file_to_ignore(db, file_id) + && db.file_app_type(file_id).ok() != Some(Some(AppType::Dep)) + && (app_name.is_none() + || db.file_app_name(file_id).ok().as_ref() == Some(&app_name)) + && let Ok(Some(result)) = + do_parse_one(db, &config_clone, file_id, &module_name, &args_clone) + { + // Send result through channel + let _ = tx.send(result); + } + }, + ) + .for_each(|_| {}); // Consume the iterator + // Channel is dropped here, signaling end of results + }); + + // Process and print results as they arrive from the channel + for result in rx { + *match_count += 1; + print_single_result(cli, loaded, &result, args, use_color)?; + } + + Ok(()) +} + +fn print_single_result( + cli: &mut dyn Cli, + loaded: &mut LoadResult, + result: &(String, FileId, Vec), + args: &Ssr, + use_color: bool, +) -> Result<()> { + let (name, file_id, diags) = result; + + if args.is_format_json() { + for diag in diags { + let vfs_path = loaded.vfs.file_path(*file_id); + let analysis = loaded.analysis(); + let root_path = &analysis + .project_data(*file_id) + .unwrap_or_else(|_err| panic!("could not find project data")) + .unwrap_or_else(|| panic!("could not find project data")) + .root_dir; + let relative_path = reporting::get_relative_path(root_path, vfs_path); + print_diagnostic_json(diag, &analysis, *file_id, relative_path, false, cli)?; + } + } else { + writeln!(cli, " {}: {}", name, diags.len())?; + + // Determine if we should show source context + let show_source = args.show_source + || args.before_context.is_some() + || args.after_context.is_some() + || args.context.is_some() + || args.group_separator.is_some() + || args.no_group_separator; + let (before_lines, after_lines) = calculate_context_lines(args); + let has_context = before_lines > 0 || after_lines > 0; + let group_separator = should_show_group_separator(args, has_context && show_source); + + for (idx, diag) in diags.iter().enumerate() { + // Print group separator before each match (except the first) if showing source with context + if show_source + && idx > 0 + && let Some(ref sep) = group_separator + { + writeln!(cli, "{}", sep)?; + } + // Get relative path for diagnostic output + let vfs_path = loaded.vfs.file_path(*file_id); + let analysis = loaded.analysis(); + let root_path = &analysis + .project_data(*file_id) + .unwrap_or_else(|_err| panic!("could not find project data")) + .unwrap_or_else(|| panic!("could not find project data")) + .root_dir; + let relative_path = reporting::get_relative_path(root_path, vfs_path); + + // Only show path when showing source context + let path_to_show = if show_source { + Some(relative_path) + } else { + None + }; + print_diagnostic(diag, &loaded.analysis(), *file_id, path_to_show, false, cli)?; + + // Only show source context if --show-source or --show-source-markers is set + if show_source { + if use_color { + print_source_with_context( + diag, + &loaded.analysis(), + *file_id, + before_lines, + after_lines, + true, + cli, + )?; + } else { + print_source_with_context_markers( + diag, + &loaded.analysis(), + *file_id, + before_lines, + after_lines, + cli, + )?; + } + writeln!(cli)?; + } + } + } + Ok(()) +} + +fn load_project( + args: &Ssr, + cli: &mut dyn Cli, + query_config: &BuckQueryConfig, +) -> Result { + log::info!("Loading project at: {:?}", args.project); + let config = DiscoverConfig::new(args.rebar, &args.profile); + load::load_project_at( + cli, + &args.project, + config, + IncludeOtp::Yes, + Mode::Server, + query_config, + ) +} +fn do_parse_one( + db: &Analysis, + config: &DiagnosticsConfig, + file_id: FileId, + name: &str, + args: &Ssr, +) -> Result)>> { + if !args.include_generated && db.is_generated(file_id)? { + return Ok(None); + } + if !args.include_tests && db.is_test_suite_or_test_helper(file_id)?.unwrap_or(false) { + return Ok(None); + } + + // Run only the SSR lint configured in lints_from_config + let diagnostics = db.with_db(|database| { + let sema = Semantic::new(database); + let mut diags = Vec::new(); + config + .lints_from_config + .get_diagnostics(&mut diags, &sema, file_id); + diags + })?; + + if !diagnostics.is_empty() { + let res = (name.to_string(), file_id, diagnostics); + Ok(Some(res)) + } else { + Ok(None) + } +} + +fn print_diagnostic( + diag: &diagnostics::Diagnostic, + analysis: &Analysis, + file_id: FileId, + path: Option<&Path>, + use_cli_severity: bool, + cli: &mut dyn Cli, +) -> Result<(), anyhow::Error> { + let line_index = analysis.line_index(file_id)?; + let diag_str = diag.print(&line_index, use_cli_severity); + if let Some(path) = path { + writeln!(cli, "{}:{}", path.display(), diag_str)?; + } else { + writeln!(cli, " {}", diag_str)?; + } + Ok(()) +} + +fn print_diagnostic_json( + diagnostic: &diagnostics::Diagnostic, + analysis: &Analysis, + file_id: FileId, + path: &Path, + use_cli_severity: bool, + cli: &mut dyn Cli, +) -> Result<(), anyhow::Error> { + let line_index = analysis.line_index(file_id)?; + let converted_diagnostic = + convert::ide_to_arc_diagnostic(&line_index, path, diagnostic, use_cli_severity); + writeln!( + cli, + "{}", + serde_json::to_string(&converted_diagnostic).unwrap_or_else(|err| panic!( + "print_diagnostics_json failed for '{converted_diagnostic:?}': {err}" + )) + )?; + Ok(()) +} + +/// Print a line with color highlighting +fn print_line_with_color( + line_num: usize, + line_content: &str, + is_match_line: bool, + start: &LineCol, + end: &LineCol, + current_line: u32, + cli: &mut dyn Cli, +) -> Result<(), anyhow::Error> { + // Line number in gray + write!(cli, "\x1b[90m{:4} |\x1b[0m ", line_num)?; + + if !is_match_line { + // Non-match line: print normally + writeln!(cli, "{}", line_content)?; + } else { + // Match line: highlight the matched portion + if current_line == start.line && current_line == end.line { + // Single-line match + let start_col = start.col_utf16 as usize; + let end_col = end.col_utf16 as usize; + + let before = &line_content[..start_col.min(line_content.len())]; + let matched = + &line_content[start_col.min(line_content.len())..end_col.min(line_content.len())]; + let after = &line_content[end_col.min(line_content.len())..]; + + write!(cli, "{}", before)?; + write!(cli, "\x1b[91;1m{}\x1b[0m", matched)?; // Red bold + writeln!(cli, "{}", after)?; + } else if current_line == start.line { + // First line of multi-line match + let start_col = start.col_utf16 as usize; + let before = &line_content[..start_col.min(line_content.len())]; + let matched = &line_content[start_col.min(line_content.len())..]; + + write!(cli, "{}", before)?; + writeln!(cli, "\x1b[91;1m{}\x1b[0m", matched)?; // Red bold + } else if current_line == end.line { + // Last line of multi-line match + let end_col = end.col_utf16 as usize; + let matched = &line_content[..end_col.min(line_content.len())]; + let after = &line_content[end_col.min(line_content.len())..]; + + write!(cli, "\x1b[91;1m{}\x1b[0m", matched)?; // Red bold + writeln!(cli, "{}", after)?; + } else { + // Middle line of multi-line match + writeln!(cli, "\x1b[91;1m{}\x1b[0m", line_content)?; // Red bold + } + } + + Ok(()) +} + +/// Calculate context lines from the new grep-style arguments +fn calculate_context_lines(args: &Ssr) -> (usize, usize) { + // -C/--context takes precedence and sets both before and after + if let Some(context) = args.context { + return (context, context); + } + + // Otherwise use individual before/after values, defaulting to 0 + let before = args.before_context.unwrap_or(0); + let after = args.after_context.unwrap_or(0); + (before, after) +} + +/// Determine if a group separator should be shown +fn should_show_group_separator(args: &Ssr, has_context: bool) -> Option { + // If --no-group-separator is set, don't show separator + if args.no_group_separator { + return None; + } + + // Only show separators if there's context to separate + if !has_context { + return None; + } + + // Use custom separator if provided, otherwise default to "--" + Some( + args.group_separator + .clone() + .unwrap_or_else(|| "--".to_string()), + ) +} + +/// Print source code context with the specified before/after context lines +fn print_source_with_context( + diag: &diagnostics::Diagnostic, + analysis: &Analysis, + file_id: FileId, + before_lines: usize, + after_lines: usize, + use_color: bool, + cli: &mut dyn Cli, +) -> Result<(), anyhow::Error> { + let line_index = analysis.line_index(file_id)?; + let source = &analysis.file_text(file_id)?; + + let range = diag.range; + let start = line_index.line_col(range.start()); + let end = line_index.line_col(range.end()); + + let lines: Vec<&str> = source.lines().collect(); + let total_lines = lines.len(); + + // Calculate the range of lines to display + let first_line = start.line.saturating_sub(before_lines as u32) as usize; + let last_line = ((end.line + after_lines as u32 + 1) as usize).min(total_lines); + + // Display the source context + for line_idx in first_line..last_line { + let line_num = line_idx + 1; + let line_content = lines.get(line_idx).unwrap_or(&""); + + // Check if this line contains part of the match + let is_match_line = line_idx >= start.line as usize && line_idx <= end.line as usize; + + if use_color { + print_line_with_color( + line_num, + line_content, + is_match_line, + &start, + &end, + line_idx as u32, + cli, + )?; + } else { + // Just print the line without any highlighting + write!(cli, "{:4} | ", line_num)?; + writeln!(cli, "{}", line_content)?; + } + } + + Ok(()) +} + +/// Print source code context with text markers +fn print_source_with_context_markers( + diag: &diagnostics::Diagnostic, + analysis: &Analysis, + file_id: FileId, + before_lines: usize, + after_lines: usize, + cli: &mut dyn Cli, +) -> Result<(), anyhow::Error> { + let line_index = analysis.line_index(file_id)?; + let source = &analysis.file_text(file_id)?; + + let range = diag.range; + let start = line_index.line_col(range.start()); + let end = line_index.line_col(range.end()); + + let lines: Vec<&str> = source.lines().collect(); + let total_lines = lines.len(); + + // Calculate the range of lines to display + let first_line = start.line.saturating_sub(before_lines as u32) as usize; + let last_line = ((end.line + after_lines as u32 + 1) as usize).min(total_lines); + + // Display the source context + for line_idx in first_line..last_line { + let line_num = line_idx + 1; + let line_content = lines.get(line_idx).unwrap_or(&""); + + // Check if this line contains part of the match + let is_match_line = line_idx >= start.line as usize && line_idx <= end.line as usize; + + print_line_with_markers( + line_num, + line_content, + is_match_line, + &start, + &end, + line_idx as u32, + cli, + )?; + } + + Ok(()) +} + +/// Print a line with text markers (like diagnostic carets) +fn print_line_with_markers( + line_num: usize, + line_content: &str, + is_match_line: bool, + start: &LineCol, + end: &LineCol, + current_line: u32, + cli: &mut dyn Cli, +) -> Result<(), anyhow::Error> { + // Line number + write!(cli, "{:4} | ", line_num)?; + writeln!(cli, "{}", line_content)?; + + if is_match_line { + // Print marker line with ^^^ under the match + write!(cli, " | ")?; // Indent to match line content + + if current_line == start.line && current_line == end.line { + // Single-line match + let start_col = start.col_utf16 as usize; + let end_col = end.col_utf16 as usize; + let marker_len = (end_col - start_col).max(1); + + // Spaces before the marker + for _ in 0..start_col { + write!(cli, " ")?; + } + // Marker carets + for _ in 0..marker_len { + write!(cli, "^")?; + } + writeln!(cli)?; + } else if current_line == start.line { + // First line of multi-line match + let start_col = start.col_utf16 as usize; + let marker_len = line_content.len().saturating_sub(start_col).max(1); + + for _ in 0..start_col { + write!(cli, " ")?; + } + for _ in 0..marker_len { + write!(cli, "^")?; + } + writeln!(cli)?; + } else if current_line == end.line { + // Last line of multi-line match + let end_col = end.col_utf16 as usize; + + for _ in 0..end_col { + write!(cli, "^")?; + } + writeln!(cli)?; + } else { + // Middle line of multi-line match + for _ in 0..line_content.len() { + write!(cli, "^")?; + } + writeln!(cli)?; + } + } + + Ok(()) +} diff --git a/crates/elp/src/build/load.rs b/crates/elp/src/build/load.rs index 115f6f56c6..67457e049f 100644 --- a/crates/elp/src/build/load.rs +++ b/crates/elp/src/build/load.rs @@ -78,14 +78,9 @@ pub fn load_project_at( bail!("no projects") }; - log::info!("Discovered project: {:?}", manifest); + log::info!("Discovered project: {manifest:?}"); let pb = cli.spinner("Loading build info"); - let project = Project::load( - &manifest, - elp_config.eqwalizer.clone(), - query_config, - &|_progress| {}, - )?; + let project = Project::load(&manifest, &elp_config, query_config, &|_progress| {})?; pb.finish(); load_project(cli, project, include_otp, eqwalizer_mode) @@ -197,13 +192,13 @@ fn load_database( let changes = vfs.take_changes(); for (_file_id, file) in changes { - if file.exists() { - if let vfs::Change::Create(v, _) | vfs::Change::Modify(v, _) = file.change { - let document = Document::from_bytes(&v); - let (text, line_ending) = document.vfs_to_salsa(); - db.set_file_text(file.file_id, Arc::from(text)); - line_ending_map.insert(file.file_id, line_ending); - } + if file.exists() + && let vfs::Change::Create(v, _) | vfs::Change::Modify(v, _) = file.change + { + let document = Document::from_bytes(&v); + let (text, line_ending) = document.vfs_to_salsa(); + db.set_file_text(file.file_id, Arc::from(text)); + line_ending_map.insert(file.file_id, line_ending); } } diff --git a/crates/elp/src/cli.rs b/crates/elp/src/cli.rs index b3070768d6..0678aaebc1 100644 --- a/crates/elp/src/cli.rs +++ b/crates/elp/src/cli.rs @@ -30,18 +30,13 @@ pub trait Cli: Write + WriteColor { fn err(&mut self) -> &mut dyn Write; } -pub struct Real(StandardStream, Stderr); +pub struct StandardCli(StandardStream, Stderr); -impl Default for Real { - fn default() -> Self { - Self( - StandardStream::stdout(ColorChoice::Always), - std::io::stderr(), - ) +impl StandardCli { + fn new(color_choice: ColorChoice) -> Self { + Self(StandardStream::stdout(color_choice), std::io::stderr()) } -} -impl Real { fn progress_with_style( &self, len: u64, @@ -59,7 +54,7 @@ impl Real { } } -impl Cli for Real { +impl Cli for StandardCli { fn progress(&self, len: u64, prefix: &'static str) -> ProgressBar { self.progress_with_style(len, prefix, " {prefix:25!} {bar} {pos}/{len} {wide_msg}") } @@ -84,6 +79,63 @@ impl Cli for Real { } } +impl Write for StandardCli { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.0.write(buf) + } + + fn flush(&mut self) -> std::io::Result<()> { + self.0.flush() + } +} + +impl WriteColor for StandardCli { + fn supports_color(&self) -> bool { + self.0.supports_color() + } + + fn set_color(&mut self, spec: &ColorSpec) -> std::io::Result<()> { + self.0.set_color(spec) + } + + fn reset(&mut self) -> std::io::Result<()> { + self.0.reset() + } +} + +pub struct Real(StandardCli); +pub struct NoColor(StandardCli); + +impl Default for Real { + fn default() -> Self { + Real(StandardCli::new(ColorChoice::Always)) + } +} + +impl Default for NoColor { + fn default() -> Self { + NoColor(StandardCli::new(ColorChoice::Never)) + } +} + +impl Cli for Real { + fn progress(&self, len: u64, prefix: &'static str) -> ProgressBar { + self.0.progress(len, prefix) + } + + fn simple_progress(&self, len: u64, prefix: &'static str) -> ProgressBar { + self.0.simple_progress(len, prefix) + } + + fn spinner(&self, prefix: &'static str) -> ProgressBar { + self.0.spinner(prefix) + } + + fn err(&mut self) -> &mut dyn Write { + self.0.err() + } +} + impl Write for Real { fn write(&mut self, buf: &[u8]) -> std::io::Result { self.0.write(buf) @@ -108,6 +160,48 @@ impl WriteColor for Real { } } +impl Cli for NoColor { + fn progress(&self, len: u64, prefix: &'static str) -> ProgressBar { + self.0.progress(len, prefix) + } + + fn simple_progress(&self, len: u64, prefix: &'static str) -> ProgressBar { + self.0.simple_progress(len, prefix) + } + + fn spinner(&self, prefix: &'static str) -> ProgressBar { + self.0.spinner(prefix) + } + + fn err(&mut self) -> &mut dyn Write { + self.0.err() + } +} + +impl Write for NoColor { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.0.write(buf) + } + + fn flush(&mut self) -> std::io::Result<()> { + self.0.flush() + } +} + +impl WriteColor for NoColor { + fn supports_color(&self) -> bool { + self.0.supports_color() + } + + fn set_color(&mut self, spec: &ColorSpec) -> std::io::Result<()> { + self.0.set_color(spec) + } + + fn reset(&mut self) -> std::io::Result<()> { + self.0.reset() + } +} + pub struct Fake(Buffer, Vec); impl Default for Fake { diff --git a/crates/elp/src/config.rs b/crates/elp/src/config.rs index 598bea9bd1..681a022261 100644 --- a/crates/elp/src/config.rs +++ b/crates/elp/src/config.rs @@ -30,7 +30,7 @@ use serde::de::DeserializeOwned; use serde_json::json; use crate::from_json; -// @fb-only +// @fb-only: use crate::meta_only; // Defines the server-side configuration of ELP. We generate *parts* // of VS Code's `package.json` config from this. @@ -42,6 +42,8 @@ use crate::from_json; // `new_name | `old_name` so that we keep parsing the old name. config_data! { struct ConfigData { + /// Whether to use the expermintal `buck2 targets` quick start process. + buck_quickStart: bool = json! { false }, /// Whether to show experimental ELP diagnostics that might /// have more false positives than usual. diagnostics_enableExperimental: bool = json! { false }, @@ -89,6 +91,12 @@ config_data! { /// Whether to show the `Link` lenses. Only applies when /// `#elp.lens.enable#` is set. lens_links_enable: bool = json! { false }, + /// Whether to enable LogView lens links. + lens_logview_links: bool = json! { false }, + /// Whether to enable Scuba lens links. + lens_scuba_links: bool = json! { false }, + /// Whether to enable WAM lens links. + lens_wam_links: bool = json! { false }, /// Configure LSP-based logging using env_logger syntax. log: String = json! { "error" }, /// Whether to show Signature Help. @@ -128,6 +136,9 @@ pub struct LensConfig { pub buck2_mode: Option, pub debug: bool, pub links: bool, + pub logview_links: bool, + pub scuba_links: bool, + pub wam_links: bool, } #[derive(Clone, Debug, PartialEq, Eq)] @@ -143,12 +154,12 @@ pub struct EqwalizerConfig { } macro_rules! try_ { - ($expr:expr_2021) => { + ($expr:expr) => { || -> _ { Some($expr) }() }; } macro_rules! try_or { - ($expr:expr_2021, $or:expr_2021) => { + ($expr:expr, $or:expr) => { try_!($expr).unwrap_or($or) }; } @@ -164,22 +175,22 @@ impl Config { } pub fn update(&mut self, json: serde_json::Value) { - log::info!("updating config from JSON: {:#}", json); + log::info!("updating config from JSON: {json:#}"); if json.is_null() || json.as_object().is_some_and(|it| it.is_empty()) { return; } self.data = ConfigData::from_json(json); - // @fb-only + // @fb-only: meta_only::harmonise_gks(self); } pub fn update_gks(&mut self, json: serde_json::Value) { - log::info!("updating gks from JSON: {:#}", json); + log::info!("updating gks from JSON: {json:#}"); if json.is_null() || json.as_object().is_some_and(|it| it.is_empty()) { return; } match from_json::("GKs", json) { Ok(val) => self.gks = val, - Err(err) => log::warn!("could not update GKs from JSON: {:#}", err), + Err(err) => log::warn!("could not update GKs from JSON: {err:#}"), } } @@ -323,6 +334,9 @@ impl Config { && self.data.lens_run_coverage_enable, debug: self.data.lens_enable && self.data.lens_debug_enable, links: self.data.lens_enable && self.data.lens_links_enable, + logview_links: self.data.lens_enable && self.data.lens_logview_links, + scuba_links: self.data.lens_enable && self.data.lens_scuba_links, + wam_links: self.data.lens_enable && self.data.lens_wam_links, } } @@ -367,14 +381,38 @@ impl Config { try_or!(self.caps.window.as_ref()?.work_done_progress?, false) } + pub fn set_buck_quick_start(&mut self, value: bool) { + self.data.buck_quickStart = value; + } + + pub fn buck_quick_start(&self) -> bool { + self.data.buck_quickStart + } + pub fn buck_query(&self) -> BuckQueryConfig { - BuckQueryConfig::BuildGeneratedCode + if self.buck_quick_start() { + BuckQueryConfig::BuckTargetsOnly + } else { + BuckQueryConfig::BuildGeneratedCode + } } pub fn set_eqwalizer_all(&mut self, value: bool) { self.data.eqwalizer_all = value; } + pub fn set_lens_logview_links(&mut self, value: bool) { + self.data.lens_logview_links = value; + } + + pub fn set_lens_scuba_links(&mut self, value: bool) { + self.data.lens_scuba_links = value; + } + + pub fn set_lens_wam_links(&mut self, value: bool) { + self.data.lens_wam_links = value; + } + pub fn inlay_hints(&self) -> InlayHintsConfig { InlayHintsConfig { parameter_hints: self.data.inlayHints_parameterHints_enable, @@ -401,7 +439,7 @@ macro_rules! _config_data { (struct $name:ident { $( $(#[doc=$doc:literal])* - $field:ident $(| $alias:ident)*: $ty:ty = $default:expr_2021, + $field:ident $(| $alias:ident)*: $ty:ty = $default:expr, )* }) => { #[allow(non_snake_case)] @@ -465,7 +503,7 @@ fn schema( fn key(f: &str) -> &str { f.split_once('_').map_or(f, |x| x.0) } - assert!(key(f1) <= key(f2), "wrong field order: {:?} {:?}", f1, f2); + assert!(key(f1) <= key(f2), "wrong field order: {f1:?} {f2:?}"); } let map = fields @@ -490,9 +528,7 @@ fn field_props( let doc = doc.trim_end_matches('\n'); assert!( doc.ends_with('.') && doc.starts_with(char::is_uppercase), - "bad docs for {}: {:?}", - field, - doc + "bad docs for {field}: {doc:?}" ); let mut map = serde_json::Map::default(); @@ -541,7 +577,7 @@ fn field_props( "type": ["null", "array"], "items": { "type": "string" }, }, - _ => panic!("{}: {}", ty, default), + _ => panic!("{ty}: {default}"), } map.into() @@ -551,14 +587,14 @@ fn doc_comment_to_string(doc: &[&str]) -> String { doc.iter() .map(|it| it.strip_prefix(' ').unwrap_or(it)) .fold(String::new(), |mut output, it| { - let _ = writeln!(output, "{}", it); + let _ = writeln!(output, "{it}"); output }) } pub fn config_schema_json() -> String { let s = Config::json_schema(); - let schema = format!("{:#}", s); + let schema = format!("{s:#}"); let mut schema = schema .trim_start_matches('{') .trim_end_matches('}') @@ -582,10 +618,15 @@ mod tests { let s = remove_ws(&schema); - expect![[r#""elp.diagnostics.disabled":{"default":[],"items":{"type":"string"},"markdownDescription":"ListofELPdiagnosticstodisable.","type":"array","uniqueItems":true},"elp.diagnostics.enableExperimental":{"default":false,"markdownDescription":"WhethertoshowexperimentalELPdiagnosticsthatmight\nhavemorefalsepositivesthanusual.","type":"boolean"},"elp.diagnostics.enableOtp":{"default":false,"markdownDescription":"WhethertoreportdiagnosticsforOTPfiles.","type":"boolean"},"elp.diagnostics.onSave.enable":{"default":false,"markdownDescription":"Updatenativediagnosticsonlywhenthefileissaved.","type":"boolean"},"elp.edoc.enable":{"default":false,"markdownDescription":"WhethertoreportEDocdiagnostics.","type":"boolean"},"elp.eqwalizer.all":{"default":false,"markdownDescription":"WhethertoreportEqwalizerdiagnosticsforthewholeprojectandnotonlyforopenedfiles.","type":"boolean"},"elp.eqwalizer.chunkSize":{"default":100,"markdownDescription":"Chunksizetouseforproject-wideeqwalization.","minimum":0,"type":"integer"},"elp.eqwalizer.maxTasks":{"default":32,"markdownDescription":"Maximumnumberoftaskstoruninparallelforproject-wideeqwalization.","minimum":0,"type":"integer"},"elp.highlightDynamic.enable":{"default":false,"markdownDescription":"Ifenabled,highlightvariableswithtype`dynamic()`whenEqwalizerresultsareavailable.","type":"boolean"},"elp.hoverActions.docLinks.enable":{"default":false,"markdownDescription":"WhethertoshowHoverActionsoftype`docs`.Onlyapplieswhen\n`#elp.hoverActions.enable#`isset.","type":"boolean"},"elp.hoverActions.enable":{"default":false,"markdownDescription":"WhethertoshowHoverActions.","type":"boolean"},"elp.inlayHints.parameterHints.enable":{"default":true,"markdownDescription":"Whethertoshowfunctionparameternameinlayhintsatthecall\nsite.","type":"boolean"},"elp.lens.buck2.mode":{"default":null,"markdownDescription":"Thebuck2modetouseforrunningtestsviathecodelenses.","type":["null","string"]},"elp.lens.debug.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Debug`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.enable":{"default":false,"markdownDescription":"WhethertoshowCodeLensesinErlangfiles.","type":"boolean"},"elp.lens.links.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Link`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.run.coverage.enable":{"default":true,"markdownDescription":"Displaycodecoverageinformationwhenrunningtestsviathe\nCodeLenses.Onlyapplieswhen`#elp.lens.enabled`and\n`#elp.lens.run.enable#`areset.","type":"boolean"},"elp.lens.run.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Run`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.run.interactive.enable":{"default":false,"markdownDescription":"Whethertoshowthe`RunInteractive`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.log":{"default":"error","markdownDescription":"ConfigureLSP-basedloggingusingenv_loggersyntax.","type":"string"},"elp.signatureHelp.enable":{"default":true,"markdownDescription":"WhethertoshowSignatureHelp.","type":"boolean"},"elp.typesOnHover.enable":{"default":false,"markdownDescription":"Displaytypeswhenhoveringoverexpressions.","type":"boolean"},"#]] + expect![[r#""elp.buck.quickStart":{"default":false,"markdownDescription":"Whethertousetheexpermintal`buck2targets`quickstartprocess.","type":"boolean"},"elp.diagnostics.disabled":{"default":[],"items":{"type":"string"},"markdownDescription":"ListofELPdiagnosticstodisable.","type":"array","uniqueItems":true},"elp.diagnostics.enableExperimental":{"default":false,"markdownDescription":"WhethertoshowexperimentalELPdiagnosticsthatmight\nhavemorefalsepositivesthanusual.","type":"boolean"},"elp.diagnostics.enableOtp":{"default":false,"markdownDescription":"WhethertoreportdiagnosticsforOTPfiles.","type":"boolean"},"elp.diagnostics.onSave.enable":{"default":false,"markdownDescription":"Updatenativediagnosticsonlywhenthefileissaved.","type":"boolean"},"elp.edoc.enable":{"default":false,"markdownDescription":"WhethertoreportEDocdiagnostics.","type":"boolean"},"elp.eqwalizer.all":{"default":false,"markdownDescription":"WhethertoreportEqwalizerdiagnosticsforthewholeprojectandnotonlyforopenedfiles.","type":"boolean"},"elp.eqwalizer.chunkSize":{"default":100,"markdownDescription":"Chunksizetouseforproject-wideeqwalization.","minimum":0,"type":"integer"},"elp.eqwalizer.maxTasks":{"default":32,"markdownDescription":"Maximumnumberoftaskstoruninparallelforproject-wideeqwalization.","minimum":0,"type":"integer"},"elp.highlightDynamic.enable":{"default":false,"markdownDescription":"Ifenabled,highlightvariableswithtype`dynamic()`whenEqwalizerresultsareavailable.","type":"boolean"},"elp.hoverActions.docLinks.enable":{"default":false,"markdownDescription":"WhethertoshowHoverActionsoftype`docs`.Onlyapplieswhen\n`#elp.hoverActions.enable#`isset.","type":"boolean"},"elp.hoverActions.enable":{"default":false,"markdownDescription":"WhethertoshowHoverActions.","type":"boolean"},"elp.inlayHints.parameterHints.enable":{"default":true,"markdownDescription":"Whethertoshowfunctionparameternameinlayhintsatthecall\nsite.","type":"boolean"},"elp.lens.buck2.mode":{"default":null,"markdownDescription":"Thebuck2modetouseforrunningtestsviathecodelenses.","type":["null","string"]},"elp.lens.debug.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Debug`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.enable":{"default":false,"markdownDescription":"WhethertoshowCodeLensesinErlangfiles.","type":"boolean"},"elp.lens.links.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Link`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.logview.links":{"default":false,"markdownDescription":"WhethertoenableLogViewlenslinks.","type":"boolean"},"elp.lens.run.coverage.enable":{"default":true,"markdownDescription":"Displaycodecoverageinformationwhenrunningtestsviathe\nCodeLenses.Onlyapplieswhen`#elp.lens.enabled`and\n`#elp.lens.run.enable#`areset.","type":"boolean"},"elp.lens.run.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Run`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.run.interactive.enable":{"default":false,"markdownDescription":"Whethertoshowthe`RunInteractive`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.scuba.links":{"default":false,"markdownDescription":"WhethertoenableScubalenslinks.","type":"boolean"},"elp.lens.wam.links":{"default":false,"markdownDescription":"WhethertoenableWAMlenslinks.","type":"boolean"},"elp.log":{"default":"error","markdownDescription":"ConfigureLSP-basedloggingusingenv_loggersyntax.","type":"string"},"elp.signatureHelp.enable":{"default":true,"markdownDescription":"WhethertoshowSignatureHelp.","type":"boolean"},"elp.typesOnHover.enable":{"default":false,"markdownDescription":"Displaytypeswhenhoveringoverexpressions.","type":"boolean"},"#]] .assert_eq(s.as_str()); expect![[r#" + "elp.buck.quickStart": { + "default": false, + "markdownDescription": "Whether to use the expermintal `buck2 targets` quick start process.", + "type": "boolean" + }, "elp.diagnostics.disabled": { "default": [], "items": { @@ -675,6 +716,11 @@ mod tests { "markdownDescription": "Whether to show the `Link` lenses. Only applies when\n`#elp.lens.enable#` is set.", "type": "boolean" }, + "elp.lens.logview.links": { + "default": false, + "markdownDescription": "Whether to enable LogView lens links.", + "type": "boolean" + }, "elp.lens.run.coverage.enable": { "default": true, "markdownDescription": "Display code coverage information when running tests via the\nCode Lenses. Only applies when `#elp.lens.enabled` and\n`#elp.lens.run.enable#` are set.", @@ -690,6 +736,16 @@ mod tests { "markdownDescription": "Whether to show the `Run Interactive` lenses. Only applies when\n`#elp.lens.enable#` is set.", "type": "boolean" }, + "elp.lens.scuba.links": { + "default": false, + "markdownDescription": "Whether to enable Scuba lens links.", + "type": "boolean" + }, + "elp.lens.wam.links": { + "default": false, + "markdownDescription": "Whether to enable WAM lens links.", + "type": "boolean" + }, "elp.log": { "default": "error", "markdownDescription": "Configure LSP-based logging using env_logger syntax.", diff --git a/crates/elp/src/convert.rs b/crates/elp/src/convert.rs index a70aeadfb0..c8db07a5d0 100644 --- a/crates/elp/src/convert.rs +++ b/crates/elp/src/convert.rs @@ -26,6 +26,7 @@ use elp_ide::elp_ide_db::assists::AssistContextDiagnostic; use elp_ide::elp_ide_db::assists::AssistContextDiagnosticCode; use elp_ide::elp_ide_db::elp_base_db::AbsPath; use elp_ide::elp_ide_db::elp_base_db::AbsPathBuf; +use elp_ide::elp_ide_db::elp_base_db::FileId; use elp_ide::elp_ide_db::elp_base_db::VfsPath; use lsp_types::DiagnosticRelatedInformation; use lsp_types::Location; @@ -67,11 +68,14 @@ pub fn diagnostic_severity(severity: Severity) -> lsp_types::DiagnosticSeverity } } -pub fn ide_to_lsp_diagnostic( +pub fn ide_to_lsp_diagnostic( line_index: &LineIndex, - url: &Url, d: &Diagnostic, -) -> lsp_types::Diagnostic { + get_file_info: F, +) -> lsp_types::Diagnostic +where + F: Fn(FileId) -> Option<(LineIndex, Url)>, +{ let code_description = match &d.code_doc_uri { Some(uri) => match lsp_types::Url::parse(uri) { Ok(href) => Some(lsp_types::CodeDescription { href }), @@ -90,17 +94,16 @@ pub fn ide_to_lsp_diagnostic( code_description, source, message: d.message.clone(), - related_information: from_related(line_index, url, &d.related_info), - tags: lsp_diagnostic_tags(&d.tag), + related_information: from_related(get_file_info, &d.related_info), + tags: d.tag.as_ref().map(lsp_diagnostic_tags), data: None, } } -fn lsp_diagnostic_tags(d: &DiagnosticTag) -> Option> { +fn lsp_diagnostic_tags(d: &DiagnosticTag) -> Vec { match d { - DiagnosticTag::None => None, - DiagnosticTag::Unused => Some(vec![lsp_types::DiagnosticTag::UNNECESSARY]), - DiagnosticTag::Deprecated => Some(vec![lsp_types::DiagnosticTag::DEPRECATED]), + DiagnosticTag::Unused => vec![lsp_types::DiagnosticTag::UNNECESSARY], + DiagnosticTag::Deprecated => vec![lsp_types::DiagnosticTag::DEPRECATED], } } @@ -123,22 +126,14 @@ pub fn eqwalizer_to_arc_diagnostic( d: &EqwalizerDiagnostic, line_index: &LineIndex, relative_path: &Path, - eqwalizer_enabled: bool, ) -> arc_types::Diagnostic { let pos = position(line_index, d.range.start()); let line_num = pos.line + 1; let character = Some(pos.character + 1); - let severity = if eqwalizer_enabled { - arc_types::Severity::Error - } else { - // We use Severity::Disabled so that we have the ability in our arc linter to choose - // to display lints for *new* files with errors that are not opted in (T118466310). - // See comment at the top of eqwalizer_cli.rs for more information. - arc_types::Severity::Disabled - }; + let severity = arc_types::Severity::Error; // formatting: https://fburl.com/max_wiki_link_to_phabricator_rich_text let explanation = match &d.explanation { - Some(s) => format!("```\n{}\n```", s), + Some(s) => format!("```\n{s}\n```"), None => "".to_string(), }; let link = format!("> [docs on `{}`]({})", d.code, d.uri); @@ -163,25 +158,30 @@ pub fn eqwalizer_to_arc_diagnostic( name, message, d.expression.clone(), + None, ) } -fn from_related( - line_index: &LineIndex, - url: &Url, +fn from_related( + get_file_info: F, r: &Option>, -) -> Option> { +) -> Option> +where + F: Fn(elp_ide::elp_ide_db::elp_base_db::FileId) -> Option<(LineIndex, Url)>, +{ r.as_ref().map(|ri| { ri.iter() - .map(|i| { + .filter_map(|i| { + // Get the line index and URL for the file that contains the related information + let (line_index, uri) = get_file_info(i.file_id)?; let location = Location { - range: range(line_index, i.range), - uri: url.clone(), + range: range(&line_index, i.range), + uri, }; - DiagnosticRelatedInformation { + Some(DiagnosticRelatedInformation { location, message: i.message.clone(), - } + }) }) .collect() }) @@ -251,6 +251,7 @@ pub fn ide_to_arc_diagnostic( None => message, }; let severity = diagnostic.severity(use_cli_severity); + let doc_path = diagnostic.code.as_doc_path(); arc_types::Diagnostic::new( path, line_num, @@ -259,5 +260,6 @@ pub fn ide_to_arc_diagnostic( diagnostic.code.as_labeled_code(), description, None, + doc_path, ) } diff --git a/crates/elp/src/handlers.rs b/crates/elp/src/handlers.rs index a429066ce4..d4f4cb4e96 100644 --- a/crates/elp/src/handlers.rs +++ b/crates/elp/src/handlers.rs @@ -17,6 +17,7 @@ use std::time::SystemTime; use anyhow::Result; use anyhow::bail; use elp_ide::Cancellable; +use elp_ide::DocResult; use elp_ide::HighlightedRange; use elp_ide::NavigationTarget; use elp_ide::RangeInfo; @@ -32,6 +33,8 @@ use elp_ide::elp_ide_db::elp_base_db::FilePosition; use elp_ide::elp_ide_db::elp_base_db::FileRange; use elp_ide::elp_ide_db::elp_base_db::ProjectId; use elp_log::telemetry; +use elp_log::timeit_with_telemetry; +use elp_syntax::SmolStr; use itertools::Itertools; use lsp_server::ErrorCode; use lsp_types::CallHierarchyIncomingCall; @@ -64,6 +67,7 @@ use crate::convert::lsp_to_assist_context_diagnostic; use crate::from_proto; use crate::lsp_ext; use crate::snapshot::Snapshot; +use crate::snapshot::TelemetryData; use crate::to_proto; pub(crate) fn handle_code_action( @@ -214,7 +218,7 @@ fn parse_action_id(action_id: &str) -> Result<(usize, SingleResolve), String> { let assist_kind: AssistKind = assist_kind_string.parse()?; let index: usize = match index_string.parse() { Ok(index) => index, - Err(e) => return Err(format!("Incorrect index string: {}", e)), + Err(e) => return Err(format!("Incorrect index string: {e}")), }; Ok(( index, @@ -339,16 +343,22 @@ fn goto_definition_telemetry(snap: &Snapshot, targets: &[NavigationTarget], star .iter() .map(|tgt| snap.file_id_to_url(tgt.file_id)) .collect(); + let target_names: Vec<_> = targets.iter().map(|tgt| tgt.name.clone()).collect(); + let target_kinds: Vec<_> = targets.iter().map(|tgt| tgt.kind).collect(); #[derive(serde::Serialize)] struct Data { targets_include_generated: bool, target_urls: Vec, + target_names: Vec, + target_kinds: Vec, } let detail = Data { targets_include_generated, target_urls, + target_names, + target_kinds, }; let duration = start.elapsed().map(|e| e.as_millis()).unwrap_or(0) as u32; let data = serde_json::to_value(detail).unwrap_or_else(|err| { @@ -357,6 +367,24 @@ fn goto_definition_telemetry(snap: &Snapshot, targets: &[NavigationTarget], star telemetry::send_with_duration("goto_definition".to_string(), data, duration, start); } +fn send_hover_telemetry(doc_result: &DocResult) { + #[derive(serde::Serialize)] + struct Data { + docs_found: bool, + text: String, + kind: String, + } + let detail = Data { + docs_found: doc_result.doc.is_some(), + text: doc_result.token_text.clone(), + kind: format!("{:?}", doc_result.token_kind), + }; + let data = serde_json::to_value(detail).unwrap_or_else(|err| { + serde_json::Value::String(format!("JSON serialization failed: {err}")) + }); + telemetry::send("hover".to_string(), data); +} + pub(crate) fn handle_goto_type_definition( snap: Snapshot, params: lsp_types::GotoDefinitionParams, @@ -386,10 +414,16 @@ pub(crate) fn handle_references( params: lsp_types::ReferenceParams, ) -> Result>> { let _p = tracing::info_span!("handle_references").entered(); + let _timer = timeit_with_telemetry!(TelemetryData::References { + file_url: params.text_document_position.text_document.uri.clone(), + position: params.text_document_position.position + }); + let mut position = from_proto::file_position(&snap, params.text_document_position)?; position.offset = snap .analysis .clamp_offset(position.file_id, position.offset)?; + let refs = match snap.analysis.find_all_refs(position)? { None => return Ok(None), Some(it) => it, @@ -413,6 +447,7 @@ pub(crate) fn handle_references( .chain(decl) }) .collect(); + Ok(Some(locations)) } @@ -449,8 +484,10 @@ pub(crate) fn handle_completion_resolve( position.offset = snap .analysis .clamp_offset(position.file_id, position.offset)?; - if let Ok(Some(res)) = snap.analysis.get_docs_at_position(position) { - let docs = res.0.markdown_text().to_string(); + if let Ok(Some(doc_result)) = snap.analysis.get_docs_at_position(position) + && let Some(doc) = doc_result.doc + { + let docs = doc.markdown_text().to_string(); let documentation = lsp_types::Documentation::MarkupContent(lsp_types::MarkupContent { kind: lsp_types::MarkupKind::Markdown, @@ -545,38 +582,43 @@ pub(crate) fn handle_hover(snap: Snapshot, params: HoverParams) -> Result