Compare commits

..

No commits in common. "main" and "2025-07-08" have entirely different histories.

1278 changed files with 49789 additions and 28828 deletions

View file

@ -1,8 +1,8 @@
[alias]
xtask = "run --package xtask --"
# @fb-only: [build]
# @fb-only: target-dir = "../../../buck-out/elp"
# @fb-only
# @fb-only
[profile.release]
codegen-units = 1

View file

@ -1,28 +0,0 @@
name: 'Build EDB'
description: 'Build the EDB debugger from source'
inputs:
os:
required: true
target:
required: true
otp-version:
required: true
runs:
using: composite
steps:
- name: Checkout EDB
uses: "actions/checkout@v4"
with:
repository: WhatsApp/edb
path: edb
ref: otp-28.0
- name: Build EDB
run: rebar3 escriptize
shell: bash
working-directory: edb
- name: Upload EDB binary
uses: "actions/upload-artifact@v4"
with:
name: edb
path: edb/_build/default/bin/edb

View file

@ -1,61 +0,0 @@
name: 'Setup Erlang/OTP'
description: 'Setup Erlang/OTP + rebar3'
inputs:
os:
required: true
otp-version:
required: true
brew-otp-version:
required: true
runs:
using: composite
steps:
- name: Install Erlang/OTP (Linux, Windows)
if: inputs.os == 'linux' || inputs.os == 'windows'
uses: erlef/setup-beam@v1
with:
otp-version: ${{ inputs.otp-version }}
install-rebar: false
install-hex: false
- name: Install Erlang/OTP (MacOS Only)
if: inputs.os == 'macos'
run: brew install erlang@${{ inputs.brew-otp-version }}
shell: bash
- name: Add erl to path (MacOS Only)
if: inputs.os == 'macos'
run: |
echo '/opt/homebrew/opt/erlang@${{ inputs.brew-otp-version }}/bin' >> $GITHUB_PATH
echo '/usr/local/opt/erlang@${{ inputs.brew-otp-version }}/bin' >> $GITHUB_PATH
shell: bash
- name: Verify Erlang version
run: erl -eval 'erlang:display(erlang:system_info(otp_release)), halt().' -noshell
shell: bash
- name: Install rebar3
run: "mkdir rebar3 && curl https://s3.amazonaws.com/rebar3/rebar3 -o rebar3/rebar3 && chmod +x rebar3/rebar3"
shell: bash
- name: Create rebar3.cmd (Windows Only)
if: inputs.os == 'windows'
working-directory: rebar3
run: |
echo '@echo off' > rebar3.cmd
echo 'setlocal' >> rebar3.cmd
echo 'set rebarscript=%~f0' >> rebar3.cmd
echo 'escript.exe "%rebarscript:.cmd=%" %*' >> rebar3.cmd
shell: pwsh
- name: Add rebar3 to path (No Windows)
if: inputs.os != 'windows'
run: 'echo "$GITHUB_WORKSPACE/rebar3" >> $GITHUB_PATH'
shell: bash
- name: Add rebar3 to path (Windows Only)
if: inputs.os == 'windows'
run: '"$env:GITHUB_WORKSPACE\rebar3" | Out-File -FilePath "$env:GITHUB_PATH" -Append'
shell: pwsh
- name: Verify rebar3 version (No Windows)
if: inputs.os != 'windows'
run: rebar3 version
shell: bash
- name: Verify rebar3 version (Windows Only)
if: inputs.os == 'windows'
run: rebar3.cmd version
shell: cmd

View file

@ -24,4 +24,4 @@ jobs:
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Build website
run: yarn build-oss
run: yarn build

View file

@ -1,50 +1,29 @@
name: ELP CI
on:
push: {}
pull_request:
branches:
- main
release:
types: [published]
env:
EQWALIZER_DIR: ${{ github.workspace }}/eqwalizer/eqwalizer
ELP_EQWALIZER_PATH: ${{ github.workspace }}/eqwalizer/eqwalizer/eqwalizer
jobs:
edb:
runs-on: ubuntu-latest
steps:
- name: Checkout erlang-language-platform
uses: "actions/checkout@v3"
- id: setup-erlang
uses: ./.github/actions/setup-erlang
with:
os: linux
otp-version: 27.3
brew-otp-version: 27
- id: build-edb
uses: ./.github/actions/build-edb
with:
os: ${{ matrix.os }}
target: ${{ matrix.target }}
otp-version: ${{ matrix.otp-version}}
ci:
strategy:
fail-fast: false
matrix:
platform-arch: [ubuntu-22.04-x64, ubuntu-22.04-arm, macos-15-x64, macos-latest-arm, windows-2022-x64]
platform-arch: [ubuntu-22.04-x64, ubuntu-22.04-arm, macos-13-x64, macos-latest-arm]
otp-version: [26.2, 27.3, 28.0]
include:
- otp-version: 26.2
brew-otp-version: 26
vscode-publish: true
choco-otp-version: 26.2.5.13
- otp-version: 27.3
brew-otp-version: 27
vscode-publish: false
choco-otp-version: 27.3.4
- otp-version: 28.0
brew-otp-version: 28
vscode-publish: false
choco-otp-version: 28.0.1
- platform-arch: ubuntu-22.04-x64
platform: ubuntu-22.04
os: linux
@ -55,8 +34,8 @@ jobs:
os: linux
target: aarch64-unknown-linux-gnu
vscode-target: linux-arm64
- platform-arch: macos-15-x64
platform: macos-15-intel
- platform-arch: macos-13-x64
platform: macos-13
os: macos
target: x86_64-apple-darwin
vscode-target: darwin-x64
@ -65,18 +44,16 @@ jobs:
os: macos
target: aarch64-apple-darwin
vscode-target: darwin-arm64
- platform-arch: windows-2022-x64
platform: windows-2022
os: windows
target: x86_64-pc-windows-msvc
vscode-target: win32-x64
runs-on: ${{ matrix.platform }}
needs: edb
steps:
- name: Checkout erlang-language-platform
uses: "actions/checkout@v3"
- name: Checkout eqwalizer
uses: "actions/checkout@v3"
with:
submodules: true
repository: WhatsApp/eqwalizer
path: eqwalizer
ref: main
- name: Set up GraalVM
uses: graalvm/setup-graalvm@v1
with:
@ -91,95 +68,62 @@ jobs:
uses: dtolnay/rust-toolchain@stable
with:
target: ${{ matrix.target }}
components: rustfmt
- name: Set up cross-compiler
if: matrix.platform-arch == 'ubuntu-22.04-arm'
run: |
sudo apt-get update
sudo apt-get install -y crossbuild-essential-arm64
- name: Install Buck2
uses: dtolnay/install-buck2@latest
- id: setup-erlang
uses: ./.github/actions/setup-erlang
- name: Install Erlang/OTP (Linux Only)
if: matrix.os == 'linux'
uses: erlef/setup-beam@v1
with:
os: ${{ matrix.os }}
otp-version: ${{ matrix.otp-version }}
brew-otp-version: ${{ matrix.brew-otp-version}}
- name: Assemble eqwalizer.jar (No Windows)
if: matrix.os != 'windows'
install-rebar: false
install-hex: false
- name: Install Erlang/OTP (MacOS Only)
if: matrix.os == 'macos'
run: brew install erlang@${{ matrix.brew-otp-version }}
- name: Add erl to path (MacOS Only)
if: matrix.os == 'macos'
run: |
echo '/opt/homebrew/opt/erlang@${{ matrix.brew-otp-version }}/bin' >> $GITHUB_PATH
echo '/usr/local/opt/erlang@${{ matrix.brew-otp-version }}/bin' >> $GITHUB_PATH
- name: Verify Erlang version
run: erl -eval 'erlang:display(erlang:system_info(otp_release)), halt().' -noshell
- name: Install rebar3
run: "mkdir rebar3 && curl https://s3.amazonaws.com/rebar3/rebar3 -o rebar3/rebar3 && chmod +x rebar3/rebar3"
- name: Add rebar3 to path
run: 'echo "$GITHUB_WORKSPACE/rebar3" >> $GITHUB_PATH'
- name: Verify rebar3 version
run: rebar3 version
- name: Assemble eqwalizer.jar
working-directory: eqwalizer/eqwalizer
run: "sbt assembly"
- name: Assemble eqwalizer.jar (Windows Only)
if: matrix.os == 'windows'
working-directory: eqwalizer\eqwalizer
run: "sbt assembly"
shell: bash
- name: Assemble eqwalizer binary (No Windows)
if: matrix.os != 'windows'
- name: Assemble eqwalizer binary
working-directory: eqwalizer/eqwalizer
run: 'native-image -H:IncludeResources=application.conf --no-server --no-fallback -jar target/scala-3.6.4/eqwalizer.jar eqwalizer'
- name: Assemble eqwalizer binary (Windows Only)
if: matrix.os == 'windows'
working-directory: eqwalizer\eqwalizer
run: 'native-image -H:IncludeResources=application.conf --no-server --no-fallback -jar target\scala-3.6.4\eqwalizer.jar eqwalizer'
- name: Ensure elp is formatted
run: 'cargo fmt -- --check'
- name: Configure Environment (No Windows)
if: matrix.os != 'windows'
run: |
echo "EQWALIZER_DIR=${{ github.workspace }}/eqwalizer/eqwalizer" >> $GITHUB_ENV
echo "ELP_EQWALIZER_PATH=${{ github.workspace }}/eqwalizer/eqwalizer/eqwalizer" >> $GITHUB_ENV
- name: Configure Environment (Windows Only)
if: matrix.os == 'windows'
run: |
echo "EQWALIZER_DIR=${{ github.workspace }}\eqwalizer\eqwalizer" >> $env:GITHUB_ENV
echo "ELP_EQWALIZER_PATH=${{ github.workspace }}\eqwalizer\eqwalizer\eqwalizer.exe" >> $env:GITHUB_ENV
- name: Test elp
# Do not run the tests in case of cross-compilation or on Windows
if: matrix.platform-arch != 'macos-latest-arm' && matrix.os != 'windows'
run: 'cargo test --workspace --target ${{ matrix.target }}'
- name: Build elp (No Windows)
if: matrix.os != 'windows'
# Do not run the tests in case of cross-compilation
if: matrix.platform-arch != 'macos-latest-arm'
run: 'cargo test --no-default-features --workspace --target ${{ matrix.target }}'
- name: Build elp
run: 'cargo build --release --target ${{ matrix.target }} --config target.aarch64-unknown-linux-gnu.linker=\"aarch64-linux-gnu-gcc\"'
- name: Build elp (Windows Only)
if: matrix.os == 'windows'
run: 'cargo build --release --target ${{ matrix.target }}'
- name: Add elp to path (No Windows)
if: matrix.os != 'windows'
- name: Add elp to path
run: 'echo "$GITHUB_WORKSPACE/target/${{ matrix.target}}/release" >> $GITHUB_PATH'
- name: Add elp to path (Windows Only)
if: matrix.os == 'windows'
run: '"$env:GITHUB_WORKSPACE\target\${{ matrix.target }}\release" | Out-File -FilePath "$env:GITHUB_PATH" -Append'
- name: Upload elp binary (No Windows)
if: matrix.os != 'windows'
- name: Upload elp binary
uses: "actions/upload-artifact@v4"
with:
name: elp-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }}
path: target/${{ matrix.target }}/release/elp
- name: Upload elp binary (Windows Only)
if: matrix.os == 'windows'
uses: "actions/upload-artifact@v4"
with:
name: elp-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }}
path: target\${{ matrix.target }}\release\elp.exe
- name: Upload eqwalizer native binary (No Windows)
if: matrix.os != 'windows'
path: target/${{ matrix.target}}/release/elp
- name: Upload eqwalizer native binary
uses: "actions/upload-artifact@v4"
with:
name: eqwalizer-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }}
path: ./eqwalizer/eqwalizer/eqwalizer
- name: Upload eqwalizer native binary (Windows Only)
if: matrix.os == 'windows'
uses: "actions/upload-artifact@v4"
with:
name: eqwalizer-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }}
path: .\eqwalizer\eqwalizer\eqwalizer.exe
- name: Make elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz (No Windows)
if: matrix.os != 'windows'
- name: Make elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz
run: 'tar -zcvf elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz -C target/${{ matrix.target}}/release/ elp'
- name: Make elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz (Windows Only)
if: matrix.os == 'windows'
run: 'tar -zcvf elp-${{ matrix.os }}-otp-${{ matrix.otp-version }}.tar.gz -C target\${{ matrix.target}}\release elp.exe'
- env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
id: get_release_url
@ -199,46 +143,15 @@ jobs:
- name: Setup Node
uses: actions/setup-node@v3
with:
node-version: 20
node-version: 18
- name: Install VSCE
run: npm install -g vsce
- name: Install OVSX
run: npm install -g ovsx
- name: Prepare VS Code Extension to host binaries (No Windows)
if: matrix.os != 'windows'
- name: Prepare VS Code Extension to host binaries
run: mkdir -p editors/code/bin
- name: Prepare VS Code Extension to host binaries (Windows Only)
if: matrix.os == 'windows'
run: if not exist "editors\code\bin" mkdir "editors\code\bin"
shell: cmd
- name: Fetch EDB escript
uses: actions/download-artifact@v4
with:
name: edb
- name: Ensure escript is executable
run: chmod +x edb
shell: bash
- name: Verify EDB escript
run: ./edb -h || true
shell: bash
- name: Package EDB escript into VS Code Extension (No Windows)
if: matrix.os != 'windows'
run: cp edb editors/code/bin
- name: Package EDB escript into VS Code Extension (Windows Only)
if: matrix.os == 'windows'
run: cp edb editors\code\bin
- name: Package eqWAlizer binary into VS Code Extension (No Windows)
if: matrix.os != 'windows'
- name: Package eqWAlizer binary into VS Code Extension
run: cp eqwalizer/eqwalizer/eqwalizer editors/code/bin
- name: Package eqWAlizer binary into VS Code Extension (Windows Only)
if: matrix.os == 'windows'
run: cp eqwalizer\eqwalizer\eqwalizer.exe editors\code\bin
- name: Package ELP binary into VS Code Extension (No Windows)
if: matrix.os != 'windows'
- name: Package ELP binary into VS Code Extension
run: cp target/${{ matrix.target}}/release/elp editors/code/bin
- name: Package ELP binary into VS Code Extension (Windows Only)
if: matrix.os == 'windows'
run: cp target\${{ matrix.target}}\release\elp.exe editors\code\bin
- name: Ensure binaries are executable
run: chmod +x editors/code/bin/*
- name: npm install
@ -253,43 +166,22 @@ jobs:
- name: Rename Package
working-directory: editors/code
run: mv erlang-language-platform-*.vsix erlang-language-platform.vsix
- name: Upload Extension (No Windows)
if: matrix.os != 'windows'
- name: Upload Extension
uses: "actions/upload-artifact@v4"
with:
name: elp-${{ matrix.os}}-${{ matrix.target }}-otp-${{ matrix.otp-version }}.vsix
path: editors/code/erlang-language-platform.vsix
- name: Upload Extension (Windows Only)
if: matrix.os == 'windows'
uses: "actions/upload-artifact@v4"
with:
name: elp-${{ matrix.os}}-${{ matrix.target }}-otp-${{ matrix.otp-version }}.vsix
path: editors\code\erlang-language-platform.vsix
- env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
name: Upload Extension Package (No Windows)
if: ${{ github.event_name == 'release' && matrix.os != 'windows' }}
name: Upload Extension Package
if: ${{ github.event_name == 'release' }}
uses: "actions/upload-release-asset@v1.0.2"
with:
asset_content_type: application/octet-stream
asset_name: elp-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }}.vsix
asset_path: editors/code/erlang-language-platform.vsix
upload_url: "${{ steps.get_release_url.outputs.upload_url }}"
- env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
name: Upload Extension Package (Windows Only)
if: ${{ github.event_name == 'release' && matrix.os == 'windows' }}
uses: "actions/upload-release-asset@v1.0.2"
with:
asset_content_type: application/octet-stream
asset_name: elp-${{ matrix.os }}-${{ matrix.target }}-otp-${{ matrix.otp-version }}.vsix
asset_path: editors\code\erlang-language-platform.vsix
upload_url: "${{ steps.get_release_url.outputs.upload_url }}"
- name: Publish extension to marketplace
working-directory: editors/code
if: ${{ github.event_name == 'release' && matrix.vscode-publish && matrix.os != 'windows' }}
if: ${{ github.event_name == 'release' && matrix.vscode-publish }}
run: vsce publish -p ${{ secrets.VSCE_PAT }} --packagePath erlang-language-platform.vsix
- name: Publish extension to OpenVSX marketplace
working-directory: editors/code
if: ${{ github.event_name == 'release' && matrix.vscode-publish && matrix.os != 'windows' }}
run: ovsx publish -p ${{ secrets.OVSX_PAT }} --packagePath erlang-language-platform.vsix

View file

@ -24,7 +24,7 @@ jobs:
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Build website
run: yarn build-oss
run: yarn build
# Popular action to deploy to GitHub Pages:
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus

3
.gitmodules vendored
View file

@ -1,3 +0,0 @@
[submodule "eqwalizer"]
path = eqwalizer
url = https://github.com/WhatsApp/eqwalizer

View file

@ -1,50 +1,20 @@
---
llms-gk: 'devmate_elp_development_md'
apply_to_regex: '^(.*\.rs|.*\.md)$'
oncalls: ['vscode_erlang']
---
# ELP Development Rules for LLMs (OSS)
# ELP Development Rules for LLMs
## Project Overview
ELP (Erlang Language Platform) is a language server and development tools suite
for Erlang, built in Rust. This project provides IDE features, diagnostics, and
code analysis for Erlang codebases.
## Build System
Use standard Cargo commands:
```bash
# Build
cargo build --release
# Run tests
cargo test --workspace
# Run clippy
cargo clippy --tests
# Format code
cargo fmt
# Code generation
cargo xtask codegen
```
ELP (Erlang Language Platform) is a language server and development tools suite for Erlang, built in Rust. This project provides IDE features, diagnostics, and code analysis for Erlang codebases.
## Diagnostic Code Management
### Adding New Diagnostic Codes
When adding new diagnostic codes to `DiagnosticCode` enum:
1. **Naming Convention**: Use descriptive PascalCase names that clearly indicate
the issue
1. **Naming Convention**: Use descriptive PascalCase names that clearly indicate the issue
- Good: `UnusedFunctionArg`, `MissingCompileWarnMissingSpec`
- Bad: `Error1`, `BadCode`
2. **Code Assignment**: Follow the established numbering scheme
- `W0000-W9999`: Native ELP diagnostics, visible in the OSS version
- `WA000-WA999`: WhatsApp-specific warnings, only visible in Meta builds
- Use the next available number in the appropriate range
- Never change the number of an existing diagnostic code
- Never change the label of an existing diagnostic code
@ -58,8 +28,7 @@ When adding new diagnostic codes to `DiagnosticCode` enum:
4. **Documentation**: Add comments explaining complex diagnostic codes
5. **Documentation File**: Create a corresponding documentation file in the
website
5. **Documentation File**: Create a corresponding documentation file in the website
- Location: `website/docs/erlang-error-index/{namespace}/{code}.md`
- Example: `W0051``website/docs/erlang-error-index/w/W0051.md`
- Include frontmatter with `sidebar_position` matching the code number
@ -72,20 +41,16 @@ When adding new diagnostic codes to `DiagnosticCode` enum:
- The `as_uri()` method automatically generates URLs pointing to these docs
### Creating DiagnosticDescriptor
Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines when and how the diagnostic runs:
Every diagnostic must have a corresponding `DiagnosticDescriptor` that defines
when and how the diagnostic runs:
1. **Static Descriptor Declaration**: Create a public static descriptor in your
diagnostic module
1. **Static Descriptor Declaration**: Create a public static descriptor in your diagnostic module
- Use `pub(crate) static DESCRIPTOR: DiagnosticDescriptor` pattern
- Define `DiagnosticConditions` with appropriate flags
- Provide a checker function that implements the diagnostic logic
2. **Diagnostic Conditions**: Configure when the diagnostic should run
- `experimental`: Mark as true for experimental/unstable diagnostics
- `include_generated`: Set to false if diagnostic shouldn't run on generated
code
- `include_generated`: Set to false if diagnostic shouldn't run on generated code
- `include_tests`: Set to false if diagnostic shouldn't run on test files
- `default_disabled`: Set to true if diagnostic requires explicit enabling
@ -94,8 +59,7 @@ when and how the diagnostic runs:
- Push diagnostics to the `diags` vector using `Diagnostic::new()`
- Use helper functions to keep the checker clean and focused
4. **Registration**: Add the descriptor to `diagnostics_descriptors()` function
in `diagnostics.rs`
4. **Registration**: Add the descriptor to `diagnostics_descriptors()` function in `diagnostics.rs`
- Include your module's `DESCRIPTOR` in the returned vector
5. **Module Structure**: Follow the established pattern
@ -104,28 +68,29 @@ when and how the diagnostic runs:
- Include comprehensive tests with `#[cfg(test)]`
- Use SSR patterns when appropriate for complex matching
### Meta-Only vs OSS Code
- Use `@fb-only` and `@oss-only` comments to mark platform-specific code
- Meta-only diagnostics should use `MetaOnlyDiagnosticCode` wrapper
- Ensure OSS builds work by providing fallbacks for Meta-only features
## Rust Code Style
### Error Handling
- Use `Result<T, E>` for fallible operations
- Prefer `?` operator over explicit match for error propagation
- Use descriptive error messages with context
### Pattern Matching
- Use exhaustive matches for enums to catch new variants at compile time
- Add explicit comments when intentionally using catch-all patterns
- Prefer early returns to reduce nesting
### String Handling
- Use `&str` for borrowed strings, `String` for owned
- Use `format!()` for complex string formatting
- Use `to_string()` for simple conversions
### Collections
- Use `FxHashMap` instead of `std::HashMap` for better performance
- Use `lazy_static!` for expensive static computations
- Prefer iterators over manual loops where possible
@ -133,116 +98,28 @@ when and how the diagnostic runs:
## Testing Guidelines
### Test Structure
- Use `expect_test` for snapshot testing of complex outputs
- Group related tests in the same module
- Use descriptive test names that explain the scenario
### Declarative Test Fixtures
ELP uses a declarative test fixture system that allows you to write tests with
inline annotations and markers directly in test strings. This system is defined
in `crates/project_model/src/test_fixture.rs`.
#### Key Features
1. **File Organization**: Use `//- /path/to/file.erl` to define multiple files
in a single test
2. **Metadata Markers**: Specify app names, include paths, OTP apps, etc. using
metadata after the path
3. **Annotations**: Mark expected diagnostics or ranges using `%% ^^^` syntax
4. **Cursors and Ranges**: Use `~` markers to indicate positions or ranges in
test code
#### Annotation Syntax
Annotations allow you to mark expected diagnostics, types, or other information
directly in test code:
- **Basic annotation**: `%% ^^^ some text` - Points to the range above matching
the caret length
- **Top-of-file marker**: `%% <<< text` (at file start) - Creates annotation at
position 0..0
- **File-wide annotation**: `%% ^^^file text` - Annotation spans the entire file
contents
- **Left-margin annotation**: `%%<^^^ text` - Annotation starts at `%%` position
instead of first `^`
- **Multiline annotations**: Use continuation lines with `%% | next line`
- Continuation lines are particularly useful for diagnostics with related information:
```erlang
foo() -> syntax error oops.
%% ^^^^^ error: P1711: syntax error before: error
%% | Related info: 0:45-50 function foo/0 undefined
```
#### Example Test Fixture
```rust
let fixture = r#"
//- /src/main.erl
-module(main).
foo( -> ok. %%
%% ^ error: W0004: Missing ')'~
"#;
```
### Test Data
- Create minimal test cases that focus on specific functionality
- Use realistic Erlang code examples in tests
- Test both positive and negative cases
### Running Tests for Specific Crates
When running tests for a specific crate, you need to specify the crate name, not
the directory name. The mapping is:
| Crate Name | Directory Name |
| -------------------- | ----------------------- |
| `elp` | `crates/elp` |
| `elp_base_db` | `crates/base_db` |
| `elp_eqwalizer` | `crates/eqwalizer` |
| `elp_erlang_service` | `crates/erlang_service` |
| `elp_ide` | `crates/ide` |
| `elp_ide_assists` | `crates/ide_assists` |
| `elp_ide_completion` | `crates/ide_completion` |
| `elp_ide_db` | `crates/ide_db` |
| `elp_ide_ssr` | `crates/ide_ssr` |
| `elp_log` | `crates/elp_log` |
| `elp_project_model` | `crates/project_model` |
| `elp_syntax` | `crates/syntax` |
| `elp_text_edit` | `crates/text_edit` |
| `elp_types_db` | `crates/types_db` |
| `hir` | `crates/hir` |
Example: To run tests for the `elp_ide` crate:
```bash
cargo test -p elp_ide
```
Or to run tests in a specific directory:
```bash
cargo test --manifest-path crates/ide/Cargo.toml
```
### Existing tests
- Do not change existing tests without asking
## Documentation
### Code Comments
- Document complex algorithms and business logic
- Explain WHY, not just WHAT the code does
- Use `///` for public API documentation
- Use `//` for internal implementation notes
### Error Messages
- Make error messages actionable and user-friendly
- Include context about what was expected vs. what was found
- Provide suggestions for fixing the issue when possible
@ -250,13 +127,11 @@ cargo test --manifest-path crates/ide/Cargo.toml
## Performance Considerations
### Memory Usage
- Use `Box<T>` for large enum variants to keep enum size small
- Consider using `Cow<str>` for strings that might be borrowed or owned
- Use `Arc<T>` for shared immutable data
### Computation
- Cache expensive computations using `lazy_static!` or `once_cell`
- Use appropriate data structures (HashMap for lookups, Vec for sequences)
- Profile code paths that handle large Erlang codebases
@ -264,13 +139,11 @@ cargo test --manifest-path crates/ide/Cargo.toml
## Integration Guidelines
### Erlang Service Integration
- Handle Erlang service errors gracefully
- Use appropriate namespaces for different error sources
- Maintain backward compatibility with existing error codes
### IDE Integration
- Provide rich diagnostic information (ranges, severity, fixes)
- Support quick fixes and code actions where appropriate
- Ensure diagnostics are fast enough for real-time feedback
@ -278,19 +151,16 @@ cargo test --manifest-path crates/ide/Cargo.toml
## Maintenance
### Backward Compatibility
- Don't change existing diagnostic codes or their meanings
- Deprecate old codes before removing them
- Maintain serialization compatibility for configuration files
### Code Organization
- Keep related functionality together in modules
- Use clear module boundaries and public APIs
- Minimize dependencies between modules
### Version Management
- Follow semantic versioning for public APIs
- Document breaking changes in release notes
- Provide migration guides for major changes
@ -298,25 +168,27 @@ cargo test --manifest-path crates/ide/Cargo.toml
## Common Patterns
### Regex Usage
- Use `lazy_static!` for compiled regexes
- Prefer specific patterns over overly broad ones
- Test regex patterns thoroughly with edge cases
### Configuration
- Support both code-based and label-based diagnostic references
- Use serde for serialization/deserialization
- Provide sensible defaults for all configuration options
### Error Recovery
- Continue processing after encountering errors when possible
- Collect multiple errors rather than failing on the first one
- Provide partial results when full analysis isn't possible
### Process
### Tools
- Always run tests before finishing
- Always run `cargo clippy --tests` before submitting PRs
- Use `cargo fmt` for code formatting
- ELP uses a cargo workspace.
- Inside Meta, use `./meta/cargo.sh` instead of `cargo`
- Inside Meta, use `./meta/clippy.sh` to run clippy
- Use `arc lint --apply-patches` for formatting.
### Process
- Always run tests before finishing.
- Always run `./meta/cargo.sh clippy --tests` before submitting a diff

95
.vscode/tasks.json vendored
View file

@ -1,95 +0,0 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "ELP: build (debug)",
"type": "shell",
// @fb-only: "command": "./meta/cargo.sh build",
"command": "cargo build", // @oss-only
"group": {
"kind": "build",
"is_default": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
}
},
{
"label": "ELP: build (release)",
"type": "shell",
// @fb-only: "command": "./meta/cargo.sh build --release",
"command": "cargo build --release", // @oss-only
"group": {
"kind": "build",
"is_default": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
}
},
{
"label": "ELP: build (release-thin)",
"type": "shell",
// @fb-only: "command": "./meta/cargo.sh build --profile release-thin --bins",
"command": "cargo build --profile release-thin --bins", // @oss-only
"group": {
"kind": "build",
"is_default": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
}
},
{
"label": "ELP: run clippy on workspace",
"type": "shell",
// @fb-only: "command": "./meta/clippy.sh --workspace --tests",
"command": "cargo clippy --workspace --tests", // @oss-only
"group": {
"kind": "build",
"is_default": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
}
},
{
"label": "ELP: run clippy on workspace, apply fixes",
"type": "shell",
// @fb-only: "command": "./meta/clippy.sh --workspace --tests --fix",
"command": "cargo clippy --workspace --tests --fix", // @oss-only
"group": {
"kind": "build",
"is_default": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
}
},
{
"label": "ELP: run tests on workspace",
"type": "shell",
// @fb-only: "command": "./meta/cargo.sh test --workspace",
"command": "cargo test --workspace", // @oss-only
"group": {
"kind": "build",
"is_default": true,
},
"presentation": {
"reveal": "always",
"panel": "new"
}
},
]
}

View file

@ -20,7 +20,7 @@ We actively welcome your pull requests.
1. Fork the repo and create your branch from `main`.
2. If you've added code that should be tested, add tests.
3. Ensure the test suite passes and that the code is formatted correctly (`cargo fmt -- --check`)
3. Ensure the test suite passes.
4. If you haven't already, complete the Contributor License Agreement ("CLA").
## Contributor License Agreement ("CLA")

23
Cargo.lock generated
View file

@ -446,10 +446,10 @@ dependencies = [
"crossbeam-channel",
"elp_eqwalizer",
"elp_ide",
"elp_ide_db",
"elp_log",
"elp_project_model",
"elp_syntax",
"elp_text_edit",
"env_logger",
"expect-test",
"fs_extra",
@ -572,6 +572,7 @@ dependencies = [
"elp_ide_ssr",
"elp_project_model",
"elp_syntax",
"elp_text_edit",
"elp_types_db",
"env_logger",
"expect-test",
@ -603,6 +604,7 @@ dependencies = [
"cov-mark",
"elp_ide_db",
"elp_syntax",
"elp_text_edit",
"expect-test",
"fxhash",
"hir",
@ -635,7 +637,6 @@ name = "elp_ide_db"
version = "1.1.0"
dependencies = [
"anyhow",
"cov-mark",
"eetf",
"either",
"elp_base_db",
@ -643,12 +644,12 @@ dependencies = [
"elp_erlang_service",
"elp_project_model",
"elp_syntax",
"elp_text_edit",
"elp_types_db",
"expect-test",
"fxhash",
"hir",
"indexmap 2.9.0",
"itertools 0.10.5",
"lazy_static",
"log",
"memchr",
@ -663,7 +664,6 @@ dependencies = [
"strum",
"strum_macros",
"tempfile",
"text-size",
"toml",
"tracing",
]
@ -734,8 +734,10 @@ dependencies = [
name = "elp_syntax"
version = "1.1.0"
dependencies = [
"cov-mark",
"eetf",
"elp_ide_db",
"elp_text_edit",
"expect-test",
"fxhash",
"indexmap 2.9.0",
@ -755,6 +757,14 @@ dependencies = [
"tree-sitter-erlang",
]
[[package]]
name = "elp_text_edit"
version = "1.1.0"
dependencies = [
"itertools 0.10.5",
"text-size",
]
[[package]]
name = "elp_types_db"
version = "1.1.0"
@ -2524,11 +2534,10 @@ dependencies = [
[[package]]
name = "tree-sitter-erlang"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2091cce4eda19c03d77928c608ac6617445a6a25691dde1e93ac0102467a6be"
version = "0.14.0"
dependencies = [
"cc",
"tree-sitter",
"tree-sitter-language",
]

View file

@ -30,9 +30,13 @@ elp_ide_ssr = { path = "./crates/ide_ssr" }
elp_log = { path = "./crates/elp_log" }
elp_project_model = { path = "./crates/project_model" }
elp_syntax = { path = "./crates/syntax" }
elp_text_edit = { path = "./crates/text_edit" }
elp_types_db = { path = "./crates/types_db" }
hir = { path = "./crates/hir" }
# Forks
erl_ast = { path = "./crates/erl_ast" }
# External crates
trie-rs = "0.4.2"
always-assert = "0.1.3"
@ -108,9 +112,8 @@ threadpool = "1.8.1"
timeout-readwrite = "0.3.3"
toml = "0.5"
tree-sitter = "0.23.2"
# When developing the grammar, you may want to point to a local version
# tree-sitter-erlang = { path = "./tree-sitter-erlang" }
tree-sitter-erlang = "0.15.0"
# @fb-only
tree-sitter-erlang = "0.14.0" # @oss-only
url = "2.5.4"
ustr = { version = "1.1.0", features = ["serde"] }
vfs = { git = "https://github.com/rust-lang/rust-analyzer", rev = "2025-03-04" }

View file

@ -0,0 +1,60 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is dual-licensed under either the MIT license found in the
* LICENSE-MIT file in the root directory of this source tree or the Apache
* License, Version 2.0 found in the LICENSE-APACHE file in the root directory
* of this source tree. You may select, at your option, one of the
* above-listed licenses.
*/
use std::thread;
use std::time;
use criterion::BenchmarkId;
use criterion::Criterion;
use criterion::criterion_group;
use criterion::criterion_main;
fn fibonacci_slow(n: u64) -> u64 {
match n {
0 => 1,
1 => 1,
n => fibonacci_slow(n - 1) + fibonacci_slow(n - 2),
}
}
fn fibonacci_fast(n: u64) -> u64 {
let mut a = 0;
let mut b = 1;
let millis = time::Duration::from_millis(12);
thread::sleep(millis);
match n {
0 => b,
_ => {
for _ in 0..n {
let c = a + b;
a = b;
b = c;
}
b
}
}
}
fn bench_fibs(c: &mut Criterion) {
let mut group = c.benchmark_group("Fibonacci");
for i in [20u64, 21u64].iter() {
group.bench_with_input(BenchmarkId::new("Recursive", i), i, |b, i| {
b.iter(|| fibonacci_slow(*i))
});
group.bench_with_input(BenchmarkId::new("Iterative", i), i, |b, i| {
b.iter(|| fibonacci_fast(*i))
});
}
group.finish();
}
criterion_group!(benches, bench_fibs);
criterion_main!(benches);

View file

@ -0,0 +1,16 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is dual-licensed under either the MIT license found in the
* LICENSE-MIT file in the root directory of this source tree or the Apache
* License, Version 2.0 found in the LICENSE-APACHE file in the root directory
* of this source tree. You may select, at your option, one of the
* above-listed licenses.
*/
use std::env;
fn main() {
let args: Vec<String> = env::args().collect();
println!("ARGS: {:?}", args);
}

View file

@ -87,7 +87,6 @@ pub trait WithFixture: Default + SourceDatabaseExt + 'static {
let (fixture, change) = ChangeFixture::parse(fixture_str);
let mut db = Self::default();
change.apply(&mut db, &|path| fixture.resolve_file_id(path));
fixture.validate(&db);
(db, fixture)
}
}
@ -102,7 +101,6 @@ pub struct ChangeFixture {
pub diagnostics_enabled: DiagnosticsEnabled,
pub tags: FxHashMap<FileId, Vec<(TextRange, Option<String>)>>,
pub annotations: FxHashMap<FileId, Vec<(TextRange, String)>>,
pub expect_parse_errors: bool,
}
struct Builder {
@ -144,7 +142,7 @@ impl Builder {
fn absolute_path(&self, path: String) -> String {
if let Some(project_dir) = &self.project_dir {
let project_dir_str = project_dir.path().as_os_str().to_str().unwrap();
format!("{project_dir_str}/{path}")
format!("{}/{}", project_dir_str, path)
} else {
path
}
@ -174,7 +172,6 @@ impl ChangeFixture {
let FixtureWithProjectMeta {
fixture,
mut diagnostics_enabled,
expect_parse_errors,
} = fixture_with_meta.clone();
let builder = Builder::new(diagnostics_enabled.clone());
@ -202,10 +199,10 @@ impl ChangeFixture {
let app_name = entry.app_data.name.clone();
if let Some(otp_extra) = entry.otp
&& otp.is_none()
{
otp = Some(otp_extra);
if let Some(otp_extra) = entry.otp {
if otp.is_none() {
otp = Some(otp_extra);
}
}
app_map.combine(entry.app_data);
@ -275,7 +272,7 @@ impl ChangeFixture {
write!(tmp_file, "{}", &text).unwrap();
}
let json_config_file = format!("{project_dir_str}/build_info.json");
let json_config_file = format!("{}/build_info.json", project_dir_str);
let mut writer = File::create(&json_config_file).unwrap();
@ -298,7 +295,7 @@ impl ChangeFixture {
ProjectManifest::discover(&AbsPathBuf::assert(json_config_file.into())).unwrap();
let loaded_project = Project::load(
&manifest,
&elp_config,
elp_config.eqwalizer,
&BuckQueryConfig::BuildGeneratedCode,
&|_| {},
)
@ -347,7 +344,6 @@ impl ChangeFixture {
diagnostics_enabled,
tags,
annotations,
expect_parse_errors,
},
change,
project,
@ -409,64 +405,6 @@ impl ChangeFixture {
.get(&VfsPath::from(path.clone()))
.cloned()
}
/// Validate all files in the fixture for syntax errors.
/// Panics with context if any syntax errors are found.
/// Skips validation if `expect_parse_errors` is set to true.
#[track_caller]
pub fn validate<DB: SourceDatabaseExt>(&self, db: &DB) {
if self.expect_parse_errors {
return;
}
let mut errors_found = Vec::new();
for file_id in &self.files {
let parse = db.parse(*file_id);
let errors = parse.errors();
if !errors.is_empty() {
let path = self
.files_by_path
.iter()
.find_map(|(vfs_path, id)| {
if id == file_id {
Some(
vfs_path
.as_path()
.map(|p| p.to_string())
.unwrap_or_else(|| format!("{:?}", vfs_path)),
)
} else {
None
}
})
.unwrap_or_else(|| format!("FileId({:?})", file_id));
let file_text = SourceDatabaseExt::file_text(db, *file_id);
let tree = parse.tree();
errors_found.push((path, file_text.to_string(), errors.to_vec(), tree));
}
}
if !errors_found.is_empty() {
let mut message =
String::from("Fixture validation failed: syntax errors found in test fixture\n\n");
for (path, text, errors, tree) in errors_found {
message.push_str(&format!("File: {}\n", path));
message.push_str(&format!("Errors: {:?}\n", errors));
message.push_str(&format!("Content:\n{}\n", text));
message.push_str(&format!("Parse Tree:\n{:#?}\n", tree));
message.push_str("---\n");
}
message.push_str(
"If this is expected, add `//- expect_parse_errors` to the start of the fixture\n",
);
panic!("{}", message);
}
}
}
fn inc_file_id(file_id: &mut FileId) {
@ -546,8 +484,8 @@ bar() -> ?FOO.
app_map: {
SourceRootId(
0,
): AppMapData {
app_data: Some(
): (
Some(
AppData {
project_id: ProjectId(
0,
@ -555,7 +493,6 @@ bar() -> ?FOO.
name: AppName(
"test-fixture",
),
buck_target_name: None,
dir: AbsPathBuf(
"/",
),
@ -581,13 +518,12 @@ bar() -> ?FOO.
is_test_target: None,
},
),
applicable_files: None,
gen_src_files: None,
},
None,
),
SourceRootId(
2,
): AppMapData {
app_data: Some(
): (
Some(
AppData {
project_id: ProjectId(
1,
@ -595,7 +531,6 @@ bar() -> ?FOO.
name: AppName(
"comp",
),
buck_target_name: None,
dir: AbsPathBuf(
"/opt/lib/comp-1.3",
),
@ -632,13 +567,12 @@ bar() -> ?FOO.
is_test_target: None,
},
),
applicable_files: None,
gen_src_files: None,
},
None,
),
SourceRootId(
1,
): AppMapData {
app_data: Some(
): (
Some(
AppData {
project_id: ProjectId(
0,
@ -646,7 +580,6 @@ bar() -> ?FOO.
name: AppName(
"foo-app",
),
buck_target_name: None,
dir: AbsPathBuf(
"/",
),
@ -672,16 +605,14 @@ bar() -> ?FOO.
is_test_target: None,
},
),
applicable_files: None,
gen_src_files: None,
},
None,
),
SourceRootId(
3,
): AppMapData {
app_data: None,
applicable_files: None,
gen_src_files: None,
},
): (
None,
None,
),
},
project_map: {
ProjectId(
@ -733,10 +664,13 @@ bar() -> ?FOO.
eqwalizer_config: EqwalizerConfig {
enable_all: true,
max_tasks: 4,
ignore_modules: [],
ignore_modules_compiled_patterns: [],
},
include_mapping: None,
include_mapping: Some(
IncludeMapping {
includes: {},
deps: {},
},
),
},
ProjectId(
1,
@ -768,10 +702,13 @@ bar() -> ?FOO.
eqwalizer_config: EqwalizerConfig {
enable_all: true,
max_tasks: 4,
ignore_modules: [],
ignore_modules_compiled_patterns: [],
},
include_mapping: None,
include_mapping: Some(
IncludeMapping {
includes: {},
deps: {},
},
),
},
},
catch_all_source_root: SourceRootId(
@ -807,8 +744,8 @@ foo() -> ?BAR.
app_map: {
SourceRootId(
0,
): AppMapData {
app_data: Some(
): (
Some(
AppData {
project_id: ProjectId(
0,
@ -816,7 +753,6 @@ foo() -> ?BAR.
name: AppName(
"test-fixture",
),
buck_target_name: None,
dir: AbsPathBuf(
"/extra",
),
@ -858,16 +794,14 @@ foo() -> ?BAR.
is_test_target: None,
},
),
applicable_files: None,
gen_src_files: None,
},
None,
),
SourceRootId(
1,
): AppMapData {
app_data: None,
applicable_files: None,
gen_src_files: None,
},
): (
None,
None,
),
},
project_map: {
ProjectId(
@ -900,10 +834,13 @@ foo() -> ?BAR.
eqwalizer_config: EqwalizerConfig {
enable_all: true,
max_tasks: 4,
ignore_modules: [],
ignore_modules_compiled_patterns: [],
},
include_mapping: None,
include_mapping: Some(
IncludeMapping {
includes: {},
deps: {},
},
),
},
ProjectId(
1,
@ -925,10 +862,13 @@ foo() -> ?BAR.
eqwalizer_config: EqwalizerConfig {
enable_all: true,
max_tasks: 4,
ignore_modules: [],
ignore_modules_compiled_patterns: [],
},
include_mapping: None,
include_mapping: Some(
IncludeMapping {
includes: {},
deps: {},
},
),
},
},
catch_all_source_root: SourceRootId(

View file

@ -10,8 +10,6 @@
use std::sync::Arc;
use elp_project_model::AppName;
use elp_project_model::buck::IncludeMappingScope;
use elp_syntax::SmolStr;
use vfs::FileId;
use vfs::VfsPath;
@ -24,53 +22,30 @@ use crate::SourceRoot;
pub struct IncludeCtx<'a> {
db: &'a dyn SourceDatabase,
source_root: Arc<SourceRoot>,
/// The starting .erl file when resolving includes
pub orig_file_id: Option<FileId>,
/// The current `FileId`. This starts out the same as
/// `orig_file_id`, but will change if a nested include file is
/// processed. The dependency graph for includes is calculated
/// based on the `orig_file_id`, if set.
pub current_file_id: FileId,
pub file_id: FileId,
}
impl<'a> IncludeCtx<'a> {
pub fn new(
db: &'a dyn SourceDatabase,
orig_file_id: Option<FileId>,
current_file_id: FileId,
) -> Self {
pub fn new(db: &'a dyn SourceDatabase, file_id: FileId) -> Self {
// Context for T171541590
let _ = stdx::panic_context::enter(format!(
"\nIncludeCtx::new: {orig_file_id:?} {current_file_id:?}"
));
let source_root_id = db.file_source_root(current_file_id);
let _ = stdx::panic_context::enter(format!("\nIncludeCtx::new: {:?}", file_id));
let source_root_id = db.file_source_root(file_id);
let source_root = db.source_root(source_root_id);
Self {
db,
orig_file_id,
current_file_id,
file_id,
source_root,
}
}
pub fn resolve_include(&self, path: &str) -> Option<FileId> {
// Note, from https://www.erlang.org/doc/apps/erts/erlc_cmd#generally-useful-flags
// When encountering an `-include` or `-include_lib` directive,
// the compiler searches for header files in the following directories:
//
// - ".", the current working directory of the file server
// - The base name of the compiled file
// - The directories specified using option -I; the directory
// specified last is searched first
self.resolve_relative(path)
.or_else(|| self.db.resolve_local(self.current_file_id, path.into()))
.or_else(|| self.db.resolve_local(self.file_id, path.into()))
}
pub fn resolve_include_lib(&self, path: &str) -> Option<FileId> {
self.resolve_include(path).or_else(|| {
self.db
.resolve_remote(self.orig_file_id, self.current_file_id, path.into())
})
self.resolve_include(path)
.or_else(|| self.db.resolve_remote(self.file_id, path.into()))
}
pub fn resolve_include_doc(&self, path: &str) -> Option<FileId> {
@ -78,126 +53,53 @@ impl<'a> IncludeCtx<'a> {
}
fn resolve_relative(&self, path: &str) -> Option<FileId> {
self.source_root.relative_path(self.current_file_id, path)
self.source_root.relative_path(self.file_id, path)
}
/// Called via salsa for inserting in the graph. We are looking
/// for a base filename in the includes of the current app (from
/// the `file_id`) or any of its dependencies
/// Called via salsa for inserting in the graph
pub(crate) fn resolve_local_query(
db: &dyn SourceDatabase,
file_id: FileId,
path: SmolStr,
) -> Option<FileId> {
let project_id = db.file_project_id(file_id)?;
let app_data = db.file_app_data(file_id)?;
if let Some(file_id) = db.mapped_include_file(
project_id,
IncludeMappingScope::Local(app_data.name.clone()),
path.clone(),
) {
if let Some(file_id) = db.mapped_include_file(project_id, path.clone()) {
Some(file_id)
} else {
// Not in the current app, look in the dependencies
let include_file_index = db.include_file_index(project_id);
if let Some(file_path) = include_file_index
.include_mapping
.find_local(&app_data.name, &path)
{
include_file_index
.path_to_file_id
.get(&VfsPath::from(file_path.clone()))
.copied()
} else {
// Fallback for non-buck2 projects
let path: &str = &path;
app_data.include_path.iter().find_map(|include| {
let name = include.join(path);
db.include_file_id(app_data.project_id, VfsPath::from(name.clone()))
})
}
let path: &str = &path;
let app_data = db.file_app_data(file_id)?;
app_data.include_path.iter().find_map(|include| {
let name = include.join(path);
db.include_file_id(app_data.project_id, VfsPath::from(name.clone()))
})
}
}
/// Called via salsa for inserting in the graph
/// When processing a .erl file, it can include other files, and so on recursively.
/// In this case, the starting file is the `orig_file_id`, and the current file is
/// the one being processed.
pub(crate) fn resolve_remote_query(
db: &dyn SourceDatabase,
orig_file_id: Option<FileId>,
current_file_id: FileId,
file_id: FileId,
path: SmolStr,
) -> Option<FileId> {
let project_id = db.file_project_id(current_file_id)?;
let project_id = db.file_project_id(file_id)?;
let project_data = db.project_data(project_id);
// `app_data` represents the app that is doing the including.
// If `orig_file_id` is set, we are possibly processing a
// nested include file. In this case we must do our checking
// based on its app data.
let app_data = orig_file_id
.map(|file_id| db.file_app_data(file_id))
.unwrap_or_else(|| db.file_app_data(current_file_id))?;
let (app_name, include_path) = path.split_once('/')?;
let source_root_id = project_data.app_roots.get(app_name)?;
let target_app_data = db.app_data(source_root_id)?;
if let Some(include_mapping) = &project_data.include_mapping {
if let Some(p) = include_mapping
.get(IncludeMappingScope::Remote, &path)
let include = if let Some(include_mapping) = &project_data.include_mapping {
include_mapping
.get(&path)
.map(|path| db.include_file_id(project_id, VfsPath::from(path.clone())))
{
if p.is_some() {
// We have an entry in the include mapping, and it maps to a FileId
if let Some(target_full_name) = &app_data.buck_target_name {
// We have an entry for the lookup, only return it
// if it is in the dependencies
if include_mapping.is_dep(target_full_name, &AppName(app_name.to_string()))
{
p
} else {
// We have a lookup value, but it is not a
// dependency, do not do fallback processing
None
}
} else {
// This should not be possible. We only have
// an include mapping for a buck project, and
// so the `buck_target_name` should be
// populated.
log::warn!(
"include mapping without buck_target_name: app:{:?}, path:{}",
&app_data.name,
&path
);
None
}
} else {
// We do have an entry in the include mapping, but
// it does not resolve to a valid FileId.
// This should also not happen.
log::warn!(
"include mapping does not resolve to FileId: app:{:?}, path:{}, p:{:?}",
&app_data.name,
&path,
&p
);
None
}
} else {
// We did not find an entry in the include mapping.
None
}
} else {
// There is no include mapping.
// This is the path followed when it is not a buck2
// project, as those are currently the only ones that
// populate the include_mapping.
None
};
include.unwrap_or_else(|| {
let (app_name, include_path) = path.split_once('/')?;
let source_root_id = project_data.app_roots.get(app_name)?;
let target_app_data = db.app_data(source_root_id)?;
let path = target_app_data.dir.join(include_path);
db.include_file_id(project_id, VfsPath::from(path.clone()))
.or_else(|| {
find_generated_include_lib(db, project_id, include_path, &target_app_data)
})
}
})
}
}
@ -253,8 +155,7 @@ pub fn generated_file_include_lib(
.iter()
.find_map(|dir| include_path.as_path()?.strip_prefix(dir))?;
let candidate = format!("{}/include/{}", inc_app_data.name, candidate_path.as_str());
let resolved_file_id =
IncludeCtx::new(db, Some(file_id), file_id).resolve_include_lib(&candidate)?;
let resolved_file_id = IncludeCtx::new(db, file_id).resolve_include_lib(&candidate)?;
if resolved_file_id == included_file_id {
// We have an equivalent include
Some(candidate)

View file

@ -15,13 +15,12 @@ use std::sync::Arc;
use elp_project_model::AppName;
use elp_project_model::AppType;
use elp_project_model::ApplicableFiles;
use elp_project_model::EqwalizerConfig;
use elp_project_model::Project;
use elp_project_model::ProjectAppData;
use elp_project_model::buck::IncludeMapping;
use elp_project_model::buck::TargetFullName;
use fxhash::FxHashMap;
use fxhash::FxHashSet;
use paths::RelPath;
use paths::Utf8Path;
use vfs::AbsPathBuf;
@ -125,8 +124,6 @@ pub struct ProjectData {
pub struct AppData {
pub project_id: ProjectId,
pub name: AppName,
/// Target name if this application originates from a buck target
pub buck_target_name: Option<TargetFullName>,
pub dir: AbsPathBuf,
/// Include directories belonging to this app only. Used for
/// include_lib resolution
@ -179,23 +176,11 @@ impl AppData {
/// Note that `AppStructure` is build-system agnostic
#[derive(Debug, Clone, Default /* Serialize, Deserialize */)]
pub struct AppStructure {
pub(crate) app_map: FxHashMap<SourceRootId, AppMapData>,
pub(crate) app_map: FxHashMap<SourceRootId, (Option<AppData>, Option<ApplicableFiles>)>,
pub(crate) project_map: FxHashMap<ProjectId, ProjectData>,
pub(crate) catch_all_source_root: SourceRootId,
}
#[derive(Debug, Clone, Default)]
pub struct AppMapData {
app_data: Option<AppData>, // TODO: should this be Arc?
applicable_files: Option<FxHashSet<AbsPathBuf>>,
gen_src_files: Option<FxHashSet<AbsPathBuf>>,
}
pub struct ApplyOutput {
pub unresolved_app_id_paths: FxHashMap<AbsPathBuf, AppDataId>,
pub gen_src_inputs: FxHashMap<AbsPathBuf, AppDataId>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)]
pub struct AppDataId(pub u32);
@ -204,20 +189,13 @@ impl AppStructure {
&mut self,
source_root_id: SourceRootId,
app_data: Option<AppData>,
applicable_files: Option<FxHashSet<AbsPathBuf>>,
gen_src_files: Option<FxHashSet<AbsPathBuf>>,
applicable_files: Option<ApplicableFiles>,
) {
let prev = self.app_map.insert(
source_root_id,
AppMapData {
app_data,
applicable_files,
gen_src_files,
},
);
let prev = self
.app_map
.insert(source_root_id, (app_data, applicable_files));
assert!(prev.is_none());
}
pub fn add_project_data(&mut self, project_id: ProjectId, project_data: ProjectData) {
let prev = self.project_map.insert(project_id, project_data);
assert!(prev.is_none());
@ -228,16 +206,15 @@ impl AppStructure {
self,
db: &mut dyn SourceDatabaseExt,
resolve_file_id: &impl Fn(&AbsPathBuf) -> Option<FileId>,
) -> ApplyOutput {
) -> FxHashMap<AbsPathBuf, AppDataId> {
let mut app_index = AppDataIndex::default();
let mut app_data_id = AppDataId(0);
let mut unresolved_paths = FxHashMap::default();
let mut gen_src_inputs = FxHashMap::default();
for (source_root_id, app_map_data) in self.app_map {
let arc_data = app_map_data.app_data.map(Arc::new);
for (source_root_id, (data, applicable_files)) in self.app_map {
let arc_data = data.map(Arc::new);
db.set_app_data_by_id(app_data_id, arc_data);
db.set_app_data_id(source_root_id, app_data_id);
if let Some(files) = app_map_data.applicable_files {
if let Some(files) = applicable_files {
files.iter().for_each(|path| {
if let Some(file_id) = resolve_file_id(path) {
app_index.map.insert(file_id, app_data_id);
@ -246,11 +223,6 @@ impl AppStructure {
}
})
}
if let Some(files) = app_map_data.gen_src_files {
for file in files {
gen_src_inputs.insert(file.clone(), app_data_id);
}
}
app_data_id = AppDataId(app_data_id.0 + 1);
}
for (project_id, project_data) in self.project_map {
@ -259,10 +231,7 @@ impl AppStructure {
db.set_app_index(Arc::new(app_index));
db.set_catch_all_source_root(self.catch_all_source_root);
ApplyOutput {
unresolved_app_id_paths: unresolved_paths,
gen_src_inputs,
}
unresolved_paths
}
}
@ -396,7 +365,6 @@ impl<'a> ProjectApps<'a> {
let input_data = AppData {
project_id,
name: app.name.clone(),
buck_target_name: app.buck_target_name.clone(),
dir: app.dir.clone(),
include_dirs: app.include_dirs.clone(),
include_path: app.include_path.clone(),
@ -408,12 +376,7 @@ impl<'a> ProjectApps<'a> {
ebin_path: app.ebin.clone(),
is_test_target: app.is_test_target,
};
app_structure.add_app_data(
root_id,
Some(input_data),
app.applicable_files.clone(),
app.gen_src_files.clone(),
);
app_structure.add_app_data(root_id, Some(input_data), app.applicable_files.clone());
}
let mut app_roots = project_root_map.remove(&project_id).unwrap_or_default();
@ -429,14 +392,14 @@ impl<'a> ProjectApps<'a> {
otp_project_id: self.otp_project_id,
app_roots,
eqwalizer_config: project.eqwalizer_config.clone(),
include_mapping: project.include_mapping.clone(),
include_mapping: Some(project.include_mapping.clone()),
};
app_structure.add_project_data(project_id, project_data);
}
// Final SourceRoot for out-of-project files
log::info!("Final source root: {:?}", SourceRootId(app_idx));
app_structure.add_app_data(SourceRootId(app_idx), None, None, None);
app_structure.add_app_data(SourceRootId(app_idx), None, None);
app_structure.catch_all_source_root = SourceRootId(app_idx);
app_structure
}

View file

@ -13,7 +13,6 @@ use std::sync::Arc;
use elp_project_model::AppName;
use elp_project_model::buck::IncludeMapping;
use elp_project_model::buck::IncludeMappingScope;
use elp_syntax::AstNode;
use elp_syntax::Parse;
use elp_syntax::SmolStr;
@ -32,7 +31,7 @@ mod module_index;
// Public API
pub mod fixture;
// @fb-only: mod meta_only;
// @fb-only
pub mod test_utils;
pub use change::Change;
pub use elp_project_model::AppType;
@ -178,12 +177,7 @@ pub trait SourceDatabase: FileLoader + salsa::Database {
fn include_file_id(&self, project_id: ProjectId, path: VfsPath) -> Option<FileId>;
fn mapped_include_file(
&self,
project_id: ProjectId,
scope: IncludeMappingScope,
path: SmolStr,
) -> Option<FileId>;
fn mapped_include_file(&self, project_id: ProjectId, path: SmolStr) -> Option<FileId>;
#[salsa::input]
fn project_data(&self, id: ProjectId) -> Arc<ProjectData>;
@ -230,12 +224,7 @@ pub trait SourceDatabase: FileLoader + salsa::Database {
fn resolve_local(&self, file_id: FileId, path: SmolStr) -> Option<FileId>;
#[salsa::invoke(IncludeCtx::resolve_remote_query)]
fn resolve_remote(
&self,
orig_file_id: Option<FileId>,
current_file_id: FileId,
path: SmolStr,
) -> Option<FileId>;
fn resolve_remote(&self, file_id: FileId, path: SmolStr) -> Option<FileId>;
}
fn app_data(db: &dyn SourceDatabase, id: SourceRootId) -> Option<Arc<AppData>> {
@ -264,13 +253,16 @@ fn module_index(db: &dyn SourceDatabase, project_id: ProjectId) -> Arc<ModuleInd
for &source_root_id in &project_data.source_roots {
let source_root = db.source_root(source_root_id);
for file_id in source_root.iter() {
if db.file_kind(file_id).is_module()
&& let Some(app_data) = db.file_app_data(file_id)
&& let Some((file_id, file_source, path)) =
source_root.file_info(file_id, &app_data)
&& let Some((name, Some("erl"))) = path.name_and_extension()
{
builder.insert(file_id, file_source, ModuleName::new(name));
if db.file_kind(file_id).is_module() {
if let Some(app_data) = db.file_app_data(file_id) {
if let Some((file_id, file_source, path)) =
source_root.file_info(file_id, &app_data)
{
if let Some((name, Some("erl"))) = path.name_and_extension() {
builder.insert(file_id, file_source, ModuleName::new(name));
}
}
}
}
}
}
@ -347,7 +339,7 @@ fn build_include_file_index(
include_file_index.add(path, file_id);
}
} else {
log::warn!("No file path for {file_id:?}");
log::warn!("No file path for {:?}", file_id);
}
}
}
@ -365,11 +357,10 @@ fn include_file_id(
fn mapped_include_file(
db: &dyn SourceDatabase,
project_id: ProjectId,
scope: IncludeMappingScope,
path: SmolStr,
) -> Option<FileId> {
let include_file_index = db.include_file_index(project_id);
let file_path = include_file_index.include_mapping.get(scope, &path)?;
let file_path = include_file_index.include_mapping.get(&path)?;
include_file_index
.path_to_file_id
.get(&VfsPath::from(file_path.clone()))
@ -430,7 +421,7 @@ fn is_otp(db: &dyn SourceDatabase, file_id: FileId) -> Option<bool> {
fn is_test_suite_or_test_helper(db: &dyn SourceDatabase, file_id: FileId) -> Option<bool> {
// Context for T171541590
let _ = stdx::panic_context::enter(format!("\nis_test_suite_or_test_helper: {file_id:?}"));
let _ = stdx::panic_context::enter(format!("\nis_test_suite_or_test_helper: {:?}", file_id));
let app_data = db.file_app_data(file_id)?;
let root_id = db.file_source_root(file_id);
let root = db.source_root(root_id);
@ -444,28 +435,28 @@ fn is_test_suite_or_test_helper(db: &dyn SourceDatabase, file_id: FileId) -> Opt
fn file_app_type(db: &dyn SourceDatabase, file_id: FileId) -> Option<AppType> {
// Context for T171541590
let _ = stdx::panic_context::enter(format!("\nfile_app_type: {file_id:?}"));
let _ = stdx::panic_context::enter(format!("\nfile_app_type: {:?}", file_id));
let app_data = db.file_app_data(file_id)?;
Some(app_data.app_type)
}
fn file_app_name(db: &dyn SourceDatabase, file_id: FileId) -> Option<AppName> {
// Context for T171541590
let _ = stdx::panic_context::enter(format!("\nfile_app_name: {file_id:?}"));
let _ = stdx::panic_context::enter(format!("\nfile_app_name: {:?}", file_id));
let app_data = db.file_app_data(file_id)?;
Some(app_data.name.clone())
}
fn file_project_id(db: &dyn SourceDatabase, file_id: FileId) -> Option<ProjectId> {
// Context for T171541590
let _ = stdx::panic_context::enter(format!("\nfile_project_id: {file_id:?}"));
let _ = stdx::panic_context::enter(format!("\nfile_project_id: {:?}", file_id));
let app_data = db.file_app_data(file_id)?;
Some(app_data.project_id)
}
pub fn module_name(db: &dyn SourceDatabase, file_id: FileId) -> Option<ModuleName> {
// Context for T171541590
let _ = stdx::panic_context::enter(format!("\nmodule_name: {file_id:?}"));
let _ = stdx::panic_context::enter(format!("\nmodule_name: {:?}", file_id));
let app_data = db.file_app_data(file_id)?;
let module_index = db.module_index(app_data.project_id);
module_index.module_for_file(file_id).cloned()
@ -476,7 +467,7 @@ static ref IGNORED_SOURCES: Vec<Regex> = {
let regexes: Vec<Vec<Regex>> = vec![
vec![Regex::new(r"^.*_SUITE_data/.+$").unwrap()],
//ignore sources goes here
// @fb-only: meta_only::ignored_sources_regexes()
// @fb-only
];
regexes.into_iter().flatten().collect::<Vec<Regex>>()
};
@ -484,7 +475,7 @@ static ref IGNORED_SOURCES: Vec<Regex> = {
fn file_kind(db: &dyn SourceDatabase, file_id: FileId) -> FileKind {
// Context for T171541590
let _ = stdx::panic_context::enter(format!("\nfile_kind: {file_id:?}"));
let _ = stdx::panic_context::enter(format!("\nfile_kind: {:?}", file_id));
let source_root_id = db.file_source_root(file_id);
let source_root = db.source_root(source_root_id);
let ignored_path = source_root
@ -496,7 +487,7 @@ fn file_kind(db: &dyn SourceDatabase, file_id: FileId) -> FileKind {
})
.unwrap_or(false);
// Context for T171541590
let _ = stdx::panic_context::enter(format!("\nfile_kind: {file_id:?}"));
let _ = stdx::panic_context::enter(format!("\nfile_kind: {:?}", file_id));
if ignored_path {
// not part of the known project model, and on list of ignored
// sources, do not process
@ -559,7 +550,7 @@ impl<T: SourceDatabaseExt> FileLoader for FileLoaderDelegate<&'_ T> {
/// If the `input` string represents an atom, and needs quoting, quote
/// it.
pub fn to_quoted_string(input: &str) -> Cow<'_, str> {
pub fn to_quoted_string(input: &str) -> Cow<str> {
fn is_valid_atom(input: &str) -> bool {
let mut chars = input.chars();
chars.next().is_some_and(|c| c.is_lowercase())

View file

@ -35,7 +35,7 @@ impl ModuleName {
self
}
pub fn to_quoted_string(&self) -> Cow<'_, str> {
pub fn to_quoted_string(&self) -> Cow<str> {
to_quoted_string(self.as_str())
}
}

View file

@ -22,10 +22,10 @@ pub use dissimilar::diff as __diff;
/// `eprintln!()` macro in case of text inequality.
#[macro_export]
macro_rules! assert_eq_text {
($left:expr, $right:expr) => {
($left:expr_2021, $right:expr_2021) => {
assert_eq_text!($left, $right,)
};
($left:expr, $right:expr, $($tt:tt)*) => {{
($left:expr_2021, $right:expr_2021, $($tt:tt)*) => {{
let left = $left;
let right = $right;
if left != right {
@ -46,8 +46,8 @@ pub fn format_diff(chunks: Vec<dissimilar::Chunk>) -> String {
for chunk in chunks {
let formatted = match chunk {
dissimilar::Chunk::Equal(text) => text.into(),
dissimilar::Chunk::Delete(text) => format!("\x1b[41m{text}\x1b[0m"),
dissimilar::Chunk::Insert(text) => format!("\x1b[42m{text}\x1b[0m"),
dissimilar::Chunk::Delete(text) => format!("\x1b[41m{}\x1b[0m", text),
dissimilar::Chunk::Insert(text) => format!("\x1b[42m{}\x1b[0m", text),
};
buf.push_str(&formatted);
}

View file

@ -18,10 +18,10 @@ workspace = true
[dependencies]
elp_eqwalizer.workspace = true
elp_ide.workspace = true
elp_ide_db.workspace = true
elp_log.workspace = true
elp_project_model.workspace = true
elp_syntax.workspace = true
elp_text_edit.workspace = true
hir.workspace = true
always-assert.workspace = true

View file

@ -33,7 +33,7 @@ fn main() {
OffsetDateTime::from_unix_timestamp(timestamp).expect("parsing SOURCE_DATE_EPOCH")
}
Err(std::env::VarError::NotPresent) => OffsetDateTime::now_utc(),
Err(e) => panic!("Error getting SOURCE_DATE_EPOCH: {e}"),
Err(e) => panic!("Error getting SOURCE_DATE_EPOCH: {}", e),
};
date.format(&date_format).expect("formatting date")
} else {
@ -43,14 +43,20 @@ fn main() {
let cargo_manifest_dir = env::var(CARGO_MANIFEST_DIR)
.expect("CARGO_MANIFEST_DIR should be set automatically by cargo");
let eqwalizer_support_dir = match eqwalizer_dir {
Ok(eqwalizer_support_dir) => format!("{eqwalizer_support_dir}/../eqwalizer_support"),
Err(_) => format!("{cargo_manifest_dir}/../../../eqwalizer/eqwalizer_support"),
Ok(eqwalizer_support_dir) => format!("{}/../eqwalizer_support", eqwalizer_support_dir),
Err(_) => format!(
"{}/../../../eqwalizer/eqwalizer_support",
cargo_manifest_dir
),
};
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed={SOURCE_DATE_EPOCH}");
println!("cargo:rerun-if-env-changed={CI}");
println!("cargo:rustc-env=BUILD_ID={build_id}");
println!("cargo:rustc-env={EQWALIZER_SUPPORT_DIR}={eqwalizer_support_dir}");
println!("cargo:rerun-if-env-changed={EQWALIZER_DIR}");
println!("cargo:rerun-if-env-changed={}", SOURCE_DATE_EPOCH);
println!("cargo:rerun-if-env-changed={}", CI);
println!("cargo:rustc-env=BUILD_ID={}", build_id);
println!(
"cargo:rustc-env={}={}",
EQWALIZER_SUPPORT_DIR, eqwalizer_support_dir
);
println!("cargo:rerun-if-env-changed={}", EQWALIZER_DIR);
}

View file

@ -8,14 +8,13 @@
* above-listed licenses.
*/
// @fb-only: /// Types as defined in https://www.internalfb.com/intern/wiki/Linting/adding-linters/#flow-type
// @fb-only: /// and https://www.internalfb.com/code/fbsource/[1238f73dac0efd4009443fee6a345a680dc9401b]/whatsapp/server/erl/tools/lint/arcanist.py?lines=17
/// Types as defined in https://www.internalfb.com/intern/wiki/Linting/adding-linters/#flow-type
/// and https://www.internalfb.com/code/fbsource/[1238f73dac0efd4009443fee6a345a680dc9401b]/whatsapp/server/erl/tools/lint/arcanist.py?lines=17 /
use std::path::Path;
use serde::Serialize;
#[derive(Debug, Serialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct Diagnostic {
// Filepath
path: String,
@ -30,7 +29,6 @@ pub struct Diagnostic {
original: Option<String>,
replacement: Option<String>,
description: Option<String>,
doc_path: Option<String>,
}
#[derive(Debug, Serialize, PartialEq, Eq)]
@ -44,7 +42,6 @@ pub enum Severity {
}
impl Diagnostic {
#[allow(clippy::too_many_arguments)]
pub fn new(
path: &Path,
line: u32,
@ -53,7 +50,6 @@ impl Diagnostic {
name: String,
description: String,
original: Option<String>,
doc_path: Option<String>,
) -> Self {
Diagnostic {
path: path.display().to_string(), // lossy on Windows for unicode paths
@ -65,7 +61,6 @@ impl Diagnostic {
original,
replacement: None,
description: Some(description),
doc_path,
}
}
}

View file

@ -11,20 +11,13 @@
use std::cmp::Ordering;
use std::env;
use std::fs;
use std::io::IsTerminal;
use std::path::PathBuf;
use anyhow::Result;
use anyhow::bail;
use bpaf::Bpaf;
use bpaf::Parser;
use bpaf::construct;
use bpaf::long;
use elp_ide::elp_ide_db::DiagnosticCode;
use elp_project_model::buck::BuckQueryConfig;
use hir::fold::MacroStrategy;
use hir::fold::ParenStrategy;
use hir::fold::Strategy;
use itertools::Itertools;
use serde::Deserialize;
@ -69,20 +62,6 @@ pub struct ParseAllElp {
guard(format_guard, "Please use json")
)]
pub format: Option<String>,
/// Report system memory usage and other statistics
#[bpaf(long("report-system-stats"))]
pub report_system_stats: bool,
/// Minimum severity level to report. Valid values: error, warning, weak_warning, information
#[bpaf(
argument("SEVERITY"),
complete(severity_completer),
fallback(None),
guard(
severity_guard,
"Please use error, warning, weak_warning, or information"
)
)]
pub severity: Option<String>,
}
#[derive(Clone, Debug, Bpaf)]
@ -155,6 +134,8 @@ pub struct EqwalizeAll {
/// Also eqwalize opted-in generated modules from project (deprecated)
#[bpaf(hide)]
pub include_generated: bool,
/// Also eqwalize test modules from project
pub include_tests: bool,
/// Exit with a non-zero status code if any errors are found
pub bail_on_error: bool,
/// Print statistics when done
@ -171,6 +152,8 @@ pub struct EqwalizeTarget {
/// Also eqwalize opted-in generated modules from application (deprecated)
#[bpaf(hide)]
pub include_generated: bool,
/// Also eqwalize test modules from project
pub include_tests: bool,
/// Exit with a non-zero status code if any errors are found
pub bail_on_error: bool,
/// target, like //erl/chatd/...
@ -189,6 +172,8 @@ pub struct EqwalizeApp {
/// Also eqwalize opted-in generated modules from project (deprecated)
#[bpaf(hide)]
pub include_generated: bool,
/// Also eqwalize test modules from project
pub include_tests: bool,
/// Run with rebar
pub rebar: bool,
/// Exit with a non-zero status code if any errors are found
@ -211,6 +196,8 @@ pub struct EqwalizeStats {
/// Also eqwalize opted-in generated modules from project (deprecated)
#[bpaf(hide)]
pub include_generated: bool,
/// Also eqwalize test modules from project
pub include_tests: bool,
/// If specified, use the provided CLI severity mapping instead of the default one
pub use_cli_severity: bool,
}
@ -250,7 +237,7 @@ pub struct Lint {
#[bpaf(argument("MODULE"))]
pub module: Option<String>,
/// Parse a single application from the project, not the entire project.
#[bpaf(long("app"), long("application"), argument("APP"))]
#[bpaf(argument("APP"))]
pub app: Option<String>,
/// Parse a single file from the project, not the entire project. This can be an include file or escript, etc.
#[bpaf(argument("FILE"))]
@ -278,6 +265,8 @@ pub struct Lint {
guard(format_guard, "Please use json")
)]
pub format: Option<String>,
/// Optional prefix to prepend to each diagnostic file path. Only used when --format=json is set
pub prefix: Option<String>,
/// Include diagnostics produced by erlc
pub include_erlc_diagnostics: bool,
@ -296,7 +285,7 @@ pub struct Lint {
#[bpaf(argument("CODE"))]
pub diagnostic_ignore: Option<String>,
/// Filter out all reported diagnostics except this one, by code or label
#[bpaf(argument("CODE"), complete(diagnostic_code_completer))]
#[bpaf(argument("CODE"))]
pub diagnostic_filter: Option<String>,
#[bpaf(external(parse_experimental_diags))]
pub experimental_diags: bool,
@ -330,106 +319,11 @@ pub struct Lint {
/// than one at a time.
pub one_shot: bool,
/// Report system memory usage and other statistics
#[bpaf(long("report-system-stats"))]
pub report_system_stats: bool,
/// Disable streaming of diagnostics when applying fixes (collect all before printing)
pub no_stream: bool,
/// Rest of args are space separated list of apps to ignore
#[bpaf(positional("IGNORED_APPS"))]
pub ignore_apps: Vec<String>,
}
#[derive(Clone, Debug, Bpaf)]
pub struct Ssr {
/// Path to directory with project, or to a JSON file (defaults to `.`)
#[bpaf(argument("PROJECT"), fallback(PathBuf::from(".")))]
pub project: PathBuf,
/// Parse a single module from the project, not the entire project.
#[bpaf(argument("MODULE"))]
pub module: Option<String>,
/// Parse a single application from the project, not the entire project.
#[bpaf(long("app"), long("application"), argument("APP"))]
pub app: Option<String>,
/// Parse a single file from the project, not the entire project. This can be an include file or escript, etc.
#[bpaf(argument("FILE"))]
pub file: Option<String>,
/// Run with rebar
pub rebar: bool,
/// Rebar3 profile to pickup (default is test)
#[bpaf(long("as"), argument("PROFILE"), fallback("test".to_string()))]
pub profile: String,
/// Also generate diagnostics for generated files
pub include_generated: bool,
/// Also generate diagnostics for test files
pub include_tests: bool,
/// Show diagnostics in JSON format
#[bpaf(
argument("FORMAT"),
complete(format_completer),
fallback(None),
guard(format_guard, "Please use json")
)]
pub format: Option<String>,
/// Macro expansion strategy: expand | no-expand | visible-expand (default expand)
#[bpaf(
long("macros"),
argument("STRATEGY"),
complete(macros_completer),
fallback(None),
guard(macros_guard, "Please supply a valid macro expansion value")
)]
pub macro_strategy: Option<String>,
/// Explicitly match parentheses. If omitted, they are ignored.
#[bpaf(long("parens"))]
pub paren_strategy: bool,
/// Dump a configuration snippet that can be put in .elp_lint.toml to match the given SSR patterns
pub dump_config: bool,
/// Show source code context for matches
#[bpaf(long("show-source"))]
pub show_source: bool,
/// Print NUM lines of leading context, enables --show-source
#[bpaf(short('B'), long("before-context"), argument("NUM"))]
pub before_context: Option<usize>,
/// Print NUM lines of trailing context, enables --show-source
#[bpaf(short('A'), long("after-context"), argument("NUM"))]
pub after_context: Option<usize>,
/// Print NUM lines of output context, enables --show-source
#[bpaf(short('C'), long("context"), argument("NUM"))]
pub context: Option<usize>,
/// Print SEP on line between matches with context, enables --show-source
#[bpaf(long("group-separator"), argument("SEP"))]
pub group_separator: Option<String>,
/// Do not print separator for matches with context, enables --show-source
#[bpaf(long("no-group-separator"))]
pub no_group_separator: bool,
/// Report system memory usage and other statistics
#[bpaf(long("report-system-stats"))]
pub report_system_stats: bool,
/// SSR specs to use
#[bpaf(
positional("SSR_SPECS"),
guard(at_least_1, "there should be at least one spec")
)]
pub ssr_specs: Vec<String>,
}
#[derive(Clone, Debug, Bpaf)]
pub struct Explain {
/// Error code to explain
@ -454,8 +348,6 @@ pub struct ProjectInfo {
pub to: Option<PathBuf>,
/// Include the buck uquery results in the output
pub buck_query: bool,
/// Dump a list of targets and their types
pub target_types: bool,
}
#[derive(Clone, Debug, Bpaf)]
@ -474,6 +366,8 @@ pub struct Glean {
pub pretty: bool,
/// Output each fact separately
pub multi: bool,
/// Optional prefix to prepend to each fact
pub prefix: Option<String>,
}
#[derive(Clone, Debug, Bpaf)]
@ -493,7 +387,6 @@ pub enum Command {
GenerateCompletions(GenerateCompletions),
RunServer(RunServer),
Lint(Lint),
Ssr(Ssr),
Version(Version),
Shell(Shell),
Explain(Explain),
@ -514,50 +407,25 @@ pub struct Args {
pub escript: Option<PathBuf>,
pub no_log_buffering: bool,
/// When using buck, invoke a build step for generated files.
#[allow(dead_code)] // Until T208401551 done
pub buck_generated: bool,
/// When using buck, do not invoke a build step for generated files.
pub no_buck_generated: bool,
/// Use buck2 targets for first stage project loading
pub buck_quick_start: bool,
/// Use color in output; WHEN is 'always', 'never', or 'auto'
#[bpaf(
long("color"),
long("colour"),
argument("WHEN"),
fallback(Some("always".to_string())),
guard(color_guard, "Please use always, never, or auto")
)]
pub color: Option<String>,
#[bpaf(external(command))]
pub command: Command,
}
impl Args {
pub fn query_config(&self) -> BuckQueryConfig {
if self.buck_quick_start {
BuckQueryConfig::BuckTargetsOnly
} else if self.no_buck_generated {
if self.no_buck_generated {
BuckQueryConfig::NoBuildGeneratedCode
} else {
BuckQueryConfig::BuildGeneratedCode
}
}
/// Determine if color should be used based on the --color argument
pub fn should_use_color(&self) -> bool {
match self.color.as_deref() {
Some("always") => true,
Some("never") => false,
Some("auto") | None => {
// Check NO_COLOR environment variable - if set (regardless of value), disable color
// Also check if stdout is connected to a TTY
env::var("NO_COLOR").is_err() && std::io::stdout().is_terminal()
}
_ => false, // Should be caught by the guard, but handle anyway
}
}
}
pub fn command() -> impl Parser<Command> {
@ -601,8 +469,7 @@ pub fn command() -> impl Parser<Command> {
.map(Command::EqwalizeStats)
.to_options()
.command("eqwalize-stats")
.help("Return statistics about code quality for eqWAlizer")
.hide();
.help("Return statistics about code quality for eqWAlizer");
let dialyze_all = dialyze_all()
.map(Command::DialyzeAll)
@ -629,18 +496,6 @@ pub fn command() -> impl Parser<Command> {
.command("lint")
.help("Parse files in project and emit diagnostics, optionally apply fixes.");
let search = ssr()
.map(Command::Ssr)
.to_options()
.command("search")
.help("Alias for 'ssr': Run SSR (Structural Search and Replace) pattern matching on project files.");
let ssr = ssr()
.map(Command::Ssr)
.to_options()
.command("ssr")
.help("Run SSR (Structural Search and Replace) pattern matching on project files.");
let run_server = run_server()
.map(Command::RunServer)
.to_options()
@ -684,26 +539,23 @@ pub fn command() -> impl Parser<Command> {
.help("Dump a JSON config stanza suitable for use in VS Code project.json");
construct!([
// Note: The order here is what is used for `elp --help` output
version,
run_server,
shell,
eqwalize,
eqwalize_all,
eqwalize_app,
eqwalize_target,
eqwalize_stats,
dialyze_all,
lint,
ssr,
search,
run_server,
generate_completions,
parse_all,
parse_elp,
explain,
build_info,
version,
shell,
eqwalize_stats,
explain,
project_info,
glean,
generate_completions,
config_stanza,
])
.fallback(Help())
@ -740,11 +592,11 @@ fn module_completer(input: &String) -> Vec<(String, Option<String>)> {
potential_path = path.parent();
continue;
} else {
if let Ok(content) = fs::read_to_string(file_path)
&& let Ok(config) = toml::from_str::<ModuleConfig>(&content)
{
for module_name in config.modules.into_iter() {
modules.push(module_name)
if let Ok(content) = fs::read_to_string(file_path) {
if let Ok(config) = toml::from_str::<ModuleConfig>(&content) {
for module_name in config.modules.into_iter() {
modules.push(module_name)
}
}
}
break;
@ -753,27 +605,6 @@ fn module_completer(input: &String) -> Vec<(String, Option<String>)> {
get_suggesions(input, modules)
}
fn diagnostic_code_completer(input: &Option<String>) -> Vec<(String, Option<String>)> {
let codes: Vec<String> = DiagnosticCode::codes_iter()
.filter(|code| match code {
DiagnosticCode::DefaultCodeForEnumIter
| DiagnosticCode::ErlangService(_)
| DiagnosticCode::Eqwalizer(_)
| DiagnosticCode::AdHoc(_) => false,
_ => true,
})
.flat_map(|code| vec![code.as_code().to_string(), code.as_label().to_string()])
.collect();
codes
.into_iter()
.filter(|code| match input {
None => true,
Some(prefix) => code.starts_with(prefix),
})
.map(|c| (c.to_string(), None))
.collect::<Vec<_>>()
}
fn format_completer(_: &Option<String>) -> Vec<(String, Option<String>)> {
vec![("json".to_string(), None)]
}
@ -786,48 +617,6 @@ fn format_guard(format: &Option<String>) -> bool {
}
}
fn severity_completer(_: &Option<String>) -> Vec<(String, Option<String>)> {
vec![
("error".to_string(), None),
("warning".to_string(), None),
("weak_warning".to_string(), None),
("information".to_string(), None),
]
}
fn severity_guard(severity: &Option<String>) -> bool {
match severity {
None => true,
Some(s) if s == "error" || s == "warning" || s == "weak_warning" || s == "information" => {
true
}
_ => false,
}
}
fn macros_completer(_: &Option<String>) -> Vec<(String, Option<String>)> {
vec![
("expand".to_string(), None),
("no-expand".to_string(), None),
("visible-expand".to_string(), None),
]
}
fn macros_guard(format: &Option<String>) -> bool {
match format {
None => true,
Some(_) => parse_macro_strategy(format).is_ok(),
}
}
fn color_guard(color: &Option<String>) -> bool {
match color {
None => true,
Some(c) if c == "always" || c == "never" || c == "auto" => true,
_ => false,
}
}
#[allow(clippy::ptr_arg)] // This is needed in the BPAF macros
fn at_least_1(data: &Vec<String>) -> bool {
!data.is_empty()
@ -908,44 +697,6 @@ impl Lint {
pub fn is_format_json(&self) -> bool {
self.format == Some("json".to_string())
}
/// To prevent flaky test results we allow disabling streaming when applying fixes
pub fn skip_stream_print(&self) -> bool {
self.apply_fix || self.no_stream
}
}
fn parse_macro_strategy(macro_strategy: &Option<String>) -> Result<MacroStrategy> {
match macro_strategy.as_deref() {
Some("no-expand") => Ok(MacroStrategy::DoNotExpand),
Some("expand") => Ok(MacroStrategy::Expand),
Some("visible-expand") => Ok(MacroStrategy::ExpandButIncludeMacroCall),
None => Ok(MacroStrategy::Expand),
Some(s) => bail!(
"Invalid macro strategy '{}'. Valid options are: expand, no-expand, visible-expand",
s
),
}
}
impl Ssr {
pub fn is_format_normal(&self) -> bool {
self.format.is_none()
}
pub fn is_format_json(&self) -> bool {
self.format == Some("json".to_string())
}
pub fn parse_strategy(&self) -> Result<Strategy> {
let macros = parse_macro_strategy(&self.macro_strategy)?;
let parens = if self.paren_strategy {
ParenStrategy::VisibleParens
} else {
ParenStrategy::InvisibleParens
};
Ok(Strategy { macros, parens })
}
}
impl ParseAllElp {

View file

@ -15,18 +15,15 @@ use std::io::Write;
use anyhow::Result;
use elp_ide::elp_ide_db::elp_base_db::AbsPath;
use elp_ide::elp_ide_db::elp_base_db::AbsPathBuf;
use elp_project_model::AppType;
use elp_project_model::ElpConfig;
use elp_project_model::EqwalizerConfig;
use elp_project_model::IncludeParentDirs;
use elp_project_model::Project;
use elp_project_model::ProjectAppData;
use elp_project_model::ProjectBuildData;
use elp_project_model::ProjectManifest;
use elp_project_model::buck::BuckQueryConfig;
use elp_project_model::buck::BuckTarget;
use elp_project_model::buck::query_buck_targets;
use elp_project_model::buck::query_buck_targets_bxl;
use elp_project_model::json::JsonConfig;
use fxhash::FxHashMap;
use crate::args::BuildInfo;
use crate::args::ProjectInfo;
@ -34,8 +31,8 @@ use crate::args::ProjectInfo;
pub(crate) fn save_build_info(args: BuildInfo, query_config: &BuckQueryConfig) -> Result<()> {
let root = fs::canonicalize(&args.project)?;
let root = AbsPathBuf::assert_utf8(root);
let (elp_config, manifest) = ProjectManifest::discover(&root)?;
let project = Project::load(&manifest, &elp_config, query_config, &|_| {})?;
let (_elp_config, manifest) = ProjectManifest::discover(&root)?;
let project = Project::load(&manifest, EqwalizerConfig::default(), query_config, &|_| {})?;
let mut writer = File::create(&args.to)?;
let json_str = serde_json::to_string_pretty::<JsonConfig>(&project.as_json(root))?;
writer.write_all(json_str.as_bytes())?;
@ -66,71 +63,28 @@ pub(crate) fn save_project_info(args: ProjectInfo, query_config: &BuckQueryConfi
}
};
if args.buck_query
&& let ProjectBuildData::Buck(buck) = &project.project_build_data
{
let buck_targets_query = query_buck_targets(&buck.buck_conf, query_config);
if let Ok(targets) = &buck_targets_query {
writer.write_all(format!("{:#?}\n", sort_buck_targets(targets)).as_bytes())?;
} else {
if args.buck_query {
if let ProjectBuildData::Buck(buck) = &project.project_build_data {
let buck_targets_query = query_buck_targets_bxl(&buck.buck_conf, query_config);
writer.write_all(b"================buck targets query raw================\n")?;
writer.write_all(format!("{:#?}\n", &buck_targets_query).as_bytes())?;
}
} else if args.target_types {
writer.write_all(b"================target types================\n")?;
for line in buck_targets_and_types(&project.project_apps) {
writer.write_all(format!("{}\n", line).as_bytes())?;
}
} else {
writer.write_all(b"================manifest================\n")?;
writer.write_all(format!("{:#?}\n", &manifest).as_bytes())?;
writer.write_all(b"================project_build_data================\n")?;
writer.write_all(format!("{:#?}\n", &project.project_build_data).as_bytes())?;
writer.write_all(b"================project_app_data================\n")?;
writer.write_all(format!("{:#?}\n", &project.project_apps).as_bytes())?;
};
}
writer.write_all(b"================manifest================\n")?;
writer.write_all(format!("{:#?}\n", &manifest).as_bytes())?;
writer.write_all(b"================project_build_data================\n")?;
writer.write_all(format!("{:#?}\n", &project.project_build_data).as_bytes())?;
writer.write_all(b"================project_app_data================\n")?;
writer.write_all(format!("{:#?}\n", &project.project_apps).as_bytes())?;
Ok(())
}
fn sort_buck_targets(hash_map: &FxHashMap<String, BuckTarget>) -> Vec<(String, &BuckTarget)> {
let mut vec = hash_map
.iter()
.map(|(n, t)| (format!("target_name:{}", n), t))
.collect::<Vec<_>>();
vec.sort_by(|a, b| a.0.cmp(&b.0));
vec
}
fn buck_targets_and_types(apps: &[ProjectAppData]) -> Vec<String> {
let tn = |tn| -> String {
if let Some(tn) = tn {
tn
} else {
"".to_string()
}
};
let mut vec = apps
.iter()
.filter(|app| app.app_type != AppType::Otp)
.filter(|app| app.is_buck_generated != Some(true))
.map(|app| {
format!(
"{:?} {:<30} {}",
app.app_type,
app.name,
tn(app.buck_target_name.clone())
)
})
.collect::<Vec<_>>();
vec.sort();
vec
}
fn load_project(
root: &AbsPath,
query_config: &BuckQueryConfig,
) -> Result<(ProjectManifest, Project)> {
let (elp_config, manifest) = ProjectManifest::discover(root)?;
let project = Project::load(&manifest, &elp_config, query_config, &|_| {})?;
let project = Project::load(&manifest, elp_config.eqwalizer, query_config, &|_| {})?;
Ok((manifest, project))
}
@ -140,6 +94,6 @@ fn load_fallback(
) -> Result<(ProjectManifest, Project)> {
let manifest = ProjectManifest::discover_no_manifest(root, IncludeParentDirs::Yes);
let elp_config = ElpConfig::default();
let project = Project::load(&manifest, &elp_config, query_config, &|_| {})?;
let project = Project::load(&manifest, elp_config.eqwalizer, query_config, &|_| {})?;
Ok((manifest, project))
}

View file

@ -16,5 +16,5 @@ use crate::args::ConfigStanza;
pub fn config_stanza(_args: &ConfigStanza, cli: &mut dyn Cli) -> Result<()> {
let schema = format!("{:#}", Config::json_schema());
Ok(writeln!(cli, "{schema}")?)
Ok(writeln!(cli, "{}", schema)?)
}

View file

@ -14,7 +14,6 @@ use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::str;
use std::time::SystemTime;
use anyhow::Result;
use anyhow::bail;
@ -22,8 +21,8 @@ use elp::build::load;
use elp::build::types::LoadResult;
use elp::cli::Cli;
use elp::convert;
use elp::memory_usage::MemoryUsage;
use elp::otp_file_to_ignore;
use elp::server::file_id_to_url;
use elp_eqwalizer::Mode;
use elp_ide::Analysis;
use elp_ide::diagnostics;
@ -40,7 +39,6 @@ use elp_ide::elp_ide_db::elp_base_db::IncludeOtp;
use elp_ide::elp_ide_db::elp_base_db::ModuleName;
use elp_ide::elp_ide_db::elp_base_db::Vfs;
use elp_ide::elp_ide_db::elp_base_db::VfsPath;
use elp_log::telemetry;
use elp_project_model::AppType;
use elp_project_model::DiscoverConfig;
use elp_project_model::buck::BuckQueryConfig;
@ -55,36 +53,6 @@ use vfs::AbsPath;
use crate::args::ParseAllElp;
use crate::reporting;
use crate::reporting::print_memory_usage;
fn parse_severity(severity: &str) -> Option<diagnostics::Severity> {
match severity {
"error" => Some(diagnostics::Severity::Error),
"warning" => Some(diagnostics::Severity::Warning),
"weak_warning" => Some(diagnostics::Severity::WeakWarning),
"information" => Some(diagnostics::Severity::Information),
_ => None,
}
}
fn severity_rank(severity: diagnostics::Severity) -> u8 {
match severity {
diagnostics::Severity::Error => 1,
diagnostics::Severity::Warning => 2,
diagnostics::Severity::WeakWarning => 3,
diagnostics::Severity::Information => 4,
}
}
fn meets_severity_threshold(
diag_severity: diagnostics::Severity,
min_severity: Option<diagnostics::Severity>,
) -> bool {
match min_severity {
None => true,
Some(min) => severity_rank(diag_severity) <= severity_rank(min),
}
}
#[derive(Debug)]
struct ParseResult {
@ -100,10 +68,6 @@ pub fn parse_all(
) -> Result<()> {
log::info!("Loading project at: {:?}", args.project);
let start_time = SystemTime::now();
// Track memory usage at the start
let memory_start = MemoryUsage::now();
let config = DiscoverConfig::new(args.rebar, &args.profile);
let loaded = load::load_project_at(
cli,
@ -123,7 +87,7 @@ pub fn parse_all(
let (file_id, name) = match &args.module {
Some(module) => {
if args.is_format_normal() {
writeln!(cli, "module specified: {module}")?;
writeln!(cli, "module specified: {}", module)?;
}
let file_id = analysis.module_file_id(loaded.project_id, module)?;
(file_id, analysis.module_name(file_id.unwrap())?)
@ -132,7 +96,7 @@ pub fn parse_all(
None => match &args.file {
Some(file_name) => {
if args.is_format_normal() {
writeln!(cli, "file specified: {file_name}")?;
writeln!(cli, "file specified: {}", file_name)?;
}
let path_buf = Utf8PathBuf::from_path_buf(fs::canonicalize(file_name).unwrap())
.expect("UTF8 conversion failed");
@ -160,9 +124,10 @@ pub fn parse_all(
(None, _, true) => do_parse_all_seq(cli, &loaded, &cfg, &args.to)?,
(None, _, false) => do_parse_all_par(cli, &loaded, &cfg, &args.to)?,
(Some(file_id), Some(name), _) => {
do_parse_one(&analysis, &cfg, &args.to, file_id, &name)?.map_or(vec![], |x| vec![x])
do_parse_one(&analysis, &loaded.vfs, &cfg, &args.to, file_id, &name)?
.map_or(vec![], |x| vec![x])
}
(Some(file_id), _, _) => panic!("Could not get name from file_id for {file_id:?}"),
(Some(file_id), _, _) => panic!("Could not get name from file_id for {:?}", file_id),
};
if args.dump_include_resolutions {
@ -171,32 +136,14 @@ pub fn parse_all(
let db = loaded.analysis_host.raw_database();
telemetry::report_elapsed_time("parse-elp operational", start_time);
let memory_end = MemoryUsage::now();
let memory_used = memory_end - memory_start;
let min_severity = args
.severity
.as_ref()
.and_then(|s| parse_severity(s.as_str()));
res.retain(|parse_result| {
parse_result
.diagnostics
.diagnostics_for(parse_result.file_id)
.iter()
.any(|diag| meets_severity_threshold(diag.severity, min_severity))
});
// We need a `Url` for converting to the lsp_types::Diagnostic for
// printing, but do not print it out. So just create a dummy value
let url = lsp_types::Url::parse("file:///unused_url").ok().unwrap();
if res.is_empty() {
if args.is_format_normal() {
writeln!(cli, "No errors reported")?;
}
if args.is_format_normal() && args.report_system_stats {
print_memory_usage(loaded.analysis_host, loaded.vfs, cli)?;
writeln!(cli, "{}", memory_used)?;
}
Ok(())
} else {
if args.is_format_normal() {
@ -207,7 +154,6 @@ pub fn parse_all(
for diags in res {
let mut combined: Vec<diagnostics::Diagnostic> =
diags.diagnostics.diagnostics_for(diags.file_id);
combined.retain(|diag| meets_severity_threshold(diag.severity, min_severity));
if args.is_format_normal() {
writeln!(cli, " {}: {}", diags.name, combined.len())?;
}
@ -234,19 +180,11 @@ pub fn parse_all(
cli,
)?;
} else {
print_diagnostic(&diag, &line_index, &mut err_in_diag, cli)?;
print_diagnostic(&diag, &line_index, &url, &mut err_in_diag, cli)?;
}
}
}
}
telemetry::report_elapsed_time("parse-elp done", start_time);
if args.is_format_normal() && args.report_system_stats {
print_memory_usage(loaded.analysis_host, loaded.vfs, cli)?;
writeln!(cli, "{}", memory_used)?;
}
if err_in_diag {
bail!("Parse failures found")
} else {
@ -270,7 +208,8 @@ fn print_diagnostic_json(
cli,
"{}",
serde_json::to_string(&converted_diagnostic).unwrap_or_else(|err| panic!(
"print_diagnostics_json failed for '{converted_diagnostic:?}': {err}"
"print_diagnostics_json failed for '{:?}': {}",
converted_diagnostic, err
))
)?;
Ok(())
@ -279,10 +218,11 @@ fn print_diagnostic_json(
fn print_diagnostic(
diag: &diagnostics::Diagnostic,
line_index: &LineIndex,
url: &lsp_types::Url,
err_in_diag: &mut bool,
cli: &mut dyn Cli,
) -> Result<(), anyhow::Error> {
let diag = convert::ide_to_lsp_diagnostic(line_index, diag, |_file_id| None);
let diag = convert::ide_to_lsp_diagnostic(line_index, url, diag);
let severity = match diag.severity {
None => DiagnosticSeverity::ERROR,
Some(sev) => {
@ -307,7 +247,7 @@ fn print_diagnostic(
fn maybe_code_as_string(mc: Option<NumberOrString>) -> String {
match mc {
Some(ns) => match ns {
NumberOrString::Number(n) => format!("{n}"),
NumberOrString::Number(n) => format!("{}", n),
NumberOrString::String(s) => s,
},
None => "".to_string(),
@ -325,6 +265,7 @@ fn do_parse_all_par(
let pb = cli.progress(module_iter.len() as u64, "Parsing modules");
let vfs = &loaded.vfs;
Ok(module_iter
.par_bridge()
.progress_with(pb)
@ -335,7 +276,7 @@ fn do_parse_all_par(
&& file_source == FileSource::Src
&& db.file_app_type(file_id).ok() != Some(Some(AppType::Dep))
{
do_parse_one(db, config, to, file_id, module_name.as_str()).unwrap()
do_parse_one(db, vfs, config, to, file_id, module_name.as_str()).unwrap()
} else {
None
}
@ -356,6 +297,7 @@ fn do_parse_all_seq(
let pb = cli.progress(module_iter.len() as u64, "Parsing modules (sequential)");
let vfs = &loaded.vfs;
let db = loaded.analysis();
Ok(module_iter
.progress_with(pb)
@ -364,7 +306,7 @@ fn do_parse_all_seq(
&& file_source == FileSource::Src
&& db.file_app_type(file_id).ok() != Some(Some(AppType::Dep))
{
do_parse_one(&db, config, to, file_id, module_name.as_str()).unwrap()
do_parse_one(&db, vfs, config, to, file_id, module_name.as_str()).unwrap()
} else {
None
}
@ -374,11 +316,13 @@ fn do_parse_all_seq(
fn do_parse_one(
db: &Analysis,
vfs: &Vfs,
config: &DiagnosticsConfig,
to: &Option<PathBuf>,
file_id: FileId,
name: &str,
) -> Result<Option<ParseResult>> {
let url = file_id_to_url(vfs, file_id);
let native = db.native_diagnostics(config, &vec![], file_id)?;
let erlang_service_diagnostics =
db.erlang_service_diagnostics(file_id, config, RemoveElpReported::Yes)?;
@ -392,18 +336,16 @@ fn do_parse_one(
.unwrap_or(LabeledDiagnostics::default());
if let Some(to) = to {
let to_path = to.join(format!("{name}.diag"));
let to_path = to.join(format!("{}.diag", name));
let mut output = File::create(to_path)?;
for diagnostic in native.iter() {
let diagnostic =
convert::ide_to_lsp_diagnostic(&line_index, diagnostic, |_file_id| None);
writeln!(output, "{diagnostic:?}")?;
let diagnostic = convert::ide_to_lsp_diagnostic(&line_index, &url, diagnostic);
writeln!(output, "{:?}", diagnostic)?;
}
for diagnostic in erlang_service.iter() {
let diagnostic =
convert::ide_to_lsp_diagnostic(&line_index, diagnostic, |_file_id| None);
writeln!(output, "{diagnostic:?}")?;
let diagnostic = convert::ide_to_lsp_diagnostic(&line_index, &url, diagnostic);
writeln!(output, "{:?}", diagnostic)?;
}
}
if !(native.is_empty() && erlang_service.is_empty()) {

View file

@ -10,7 +10,6 @@
use std::path::Path;
use std::sync::Arc;
use std::time::SystemTime;
use anyhow::Context;
use anyhow::Result;
@ -39,7 +38,6 @@ use elp_ide::elp_ide_db::elp_base_db::FileId;
use elp_ide::elp_ide_db::elp_base_db::IncludeOtp;
use elp_ide::elp_ide_db::elp_base_db::ModuleName;
use elp_ide::elp_ide_db::elp_base_db::VfsPath;
use elp_log::telemetry;
use elp_project_model::AppName;
use elp_project_model::DiscoverConfig;
use elp_project_model::ProjectBuildData;
@ -78,7 +76,6 @@ pub fn eqwalize_module(
cli: &mut dyn Cli,
query_config: &BuckQueryConfig,
) -> Result<()> {
let start_time = SystemTime::now();
let config = DiscoverConfig::new(args.rebar, &args.profile);
let mut loaded = load::load_project_at(
cli,
@ -89,10 +86,7 @@ pub fn eqwalize_module(
query_config,
)?;
build::compile_deps(&loaded, cli)?;
telemetry::report_elapsed_time("eqwalize operational", start_time);
let r = do_eqwalize_module(args, &mut loaded, cli);
telemetry::report_elapsed_time("eqwalize done", start_time);
r
do_eqwalize_module(args, &mut loaded, cli)
}
pub fn do_eqwalize_module(
@ -106,10 +100,11 @@ pub fn do_eqwalize_module(
for module in &args.modules {
let suggest_name = Path::new(module).file_stem().and_then(|name| name.to_str());
let context_str = match suggest_name {
Some(name) if name != module => {
format!("Module {module} not found. Did you mean elp eqwalize {name}?")
}
_ => format!("Module {module} not found"),
Some(name) if name != module => format!(
"Module {} not found. Did you mean elp eqwalize {}?",
module, name
),
_ => format!("Module {} not found", module),
};
let file_id = analysis
.module_file_id(loaded.project_id, module)?
@ -149,7 +144,6 @@ pub fn eqwalize_all(
cli: &mut dyn Cli,
query_config: &BuckQueryConfig,
) -> Result<()> {
let start_time = SystemTime::now();
// Hack to avoid hint appearing in tests
cli.spinner(SHELL_HINT).finish();
let config = DiscoverConfig::new(args.rebar, &args.profile);
@ -162,10 +156,7 @@ pub fn eqwalize_all(
query_config,
)?;
build::compile_deps(&loaded, cli)?;
telemetry::report_elapsed_time("eqwalize-all operational", start_time);
let r = do_eqwalize_all(args, &mut loaded, cli);
telemetry::report_elapsed_time("eqwalize-all done", start_time);
r
do_eqwalize_all(args, &mut loaded, cli)
}
pub fn do_eqwalize_all(
@ -178,7 +169,7 @@ pub fn do_eqwalize_all(
let module_index = analysis.module_index(loaded.project_id)?;
let include_generated = args.include_generated;
if include_generated {
write!(cli, "{DEPRECATED_INCLUDE_GENERATED}")?;
write!(cli, "{}", DEPRECATED_INCLUDE_GENERATED)?;
}
let pb = cli.progress(module_index.len_own() as u64, "Gathering modules");
let file_ids: Vec<FileId> = module_index
@ -186,7 +177,10 @@ pub fn do_eqwalize_all(
.par_bridge()
.progress_with(pb.clone())
.map_with(analysis.clone(), |analysis, (name, _source, file_id)| {
if analysis.should_eqwalize(file_id).unwrap() && !otp_file_to_ignore(analysis, file_id)
if analysis
.should_eqwalize(file_id, args.include_tests)
.unwrap()
&& !otp_file_to_ignore(analysis, file_id)
{
if args.stats {
add_stat(name.to_string());
@ -233,7 +227,6 @@ pub fn eqwalize_app(
cli: &mut dyn Cli,
query_config: &BuckQueryConfig,
) -> Result<()> {
let start_time = SystemTime::now();
let config = DiscoverConfig::new(args.rebar, &args.profile);
let mut loaded = load::load_project_at(
cli,
@ -244,10 +237,7 @@ pub fn eqwalize_app(
query_config,
)?;
build::compile_deps(&loaded, cli)?;
telemetry::report_elapsed_time("eqwalize-app operational", start_time);
let r = do_eqwalize_app(args, &mut loaded, cli);
telemetry::report_elapsed_time("eqwalize-app done", start_time);
r
do_eqwalize_app(args, &mut loaded, cli)
}
pub fn do_eqwalize_app(
@ -260,13 +250,15 @@ pub fn do_eqwalize_app(
let module_index = analysis.module_index(loaded.project_id)?;
let include_generated = args.include_generated;
if include_generated {
write!(cli, "{DEPRECATED_INCLUDE_GENERATED}")?;
write!(cli, "{}", DEPRECATED_INCLUDE_GENERATED)?;
}
let file_ids: Vec<FileId> = module_index
.iter_own()
.filter_map(|(_name, _source, file_id)| {
if analysis.file_app_name(file_id).ok()? == Some(AppName(args.app.clone()))
&& analysis.should_eqwalize(file_id).unwrap()
&& analysis
.should_eqwalize(file_id, args.include_tests)
.unwrap()
&& !otp_file_to_ignore(analysis, file_id)
{
Some(file_id)
@ -292,7 +284,6 @@ pub fn eqwalize_target(
cli: &mut dyn Cli,
query_config: &BuckQueryConfig,
) -> Result<()> {
let start_time = SystemTime::now();
let config = DiscoverConfig::buck();
let mut loaded = load::load_project_at(
cli,
@ -304,7 +295,6 @@ pub fn eqwalize_target(
)?;
set_eqwalizer_config(&mut loaded);
telemetry::report_elapsed_time("eqwalize-target operational", start_time);
let buck = match &loaded.project.project_build_data {
ProjectBuildData::Buck(buck) => buck,
@ -317,7 +307,7 @@ pub fn eqwalize_target(
let analysis = &loaded.analysis();
let include_generated = args.include_generated;
if include_generated {
write!(cli, "{DEPRECATED_INCLUDE_GENERATED}")?;
write!(cli, "{}", DEPRECATED_INCLUDE_GENERATED)?;
}
let mut file_ids: Vec<FileId> = Default::default();
let mut at_least_one_found = false;
@ -334,7 +324,9 @@ pub fn eqwalize_target(
let vfs_path = VfsPath::from(src.clone());
if let Some((file_id, _)) = loaded.vfs.file_id(&vfs_path) {
at_least_one_found = true;
if analysis.should_eqwalize(file_id).unwrap()
if analysis
.should_eqwalize(file_id, args.include_tests)
.unwrap()
&& !otp_file_to_ignore(analysis, file_id)
{
file_ids.push(file_id);
@ -362,15 +354,13 @@ elp eqwalize-target erl/chatd #same as //erl/chatd/... but enables shell complet
let mut reporter = reporting::PrettyReporter::new(analysis, &loaded, cli);
let bail_on_error = args.bail_on_error;
let r = eqwalize(EqwalizerInternalArgs {
eqwalize(EqwalizerInternalArgs {
analysis,
loaded: &loaded,
file_ids,
reporter: &mut reporter,
bail_on_error,
});
telemetry::report_elapsed_time("eqwalize-target done", start_time);
r
})
}
pub fn eqwalize_stats(
@ -392,7 +382,7 @@ pub fn eqwalize_stats(
let module_index = analysis.module_index(loaded.project_id)?;
let include_generated = args.include_generated;
if include_generated {
write!(cli, "{DEPRECATED_INCLUDE_GENERATED}")?;
write!(cli, "{}", DEPRECATED_INCLUDE_GENERATED)?;
}
let project_id = loaded.project_id;
let pb = cli.progress(module_index.len_own() as u64, "Computing stats");
@ -401,7 +391,9 @@ pub fn eqwalize_stats(
.par_bridge()
.progress_with(pb.clone())
.map_with(analysis.clone(), |analysis, (name, _source, file_id)| {
if analysis.should_eqwalize(file_id).expect("cancelled")
if analysis
.should_eqwalize(file_id, args.include_tests)
.expect("cancelled")
&& !otp_file_to_ignore(analysis, file_id)
{
analysis
@ -454,7 +446,8 @@ fn print_diagnostic_json(
cli,
"{}",
serde_json::to_string(&converted_diagnostic).unwrap_or_else(|err| panic!(
"print_diagnostics_json failed for '{converted_diagnostic:?}': {err}"
"print_diagnostics_json failed for '{:?}': {}",
converted_diagnostic, err
))
)?;
Ok(())
@ -473,6 +466,8 @@ fn eqwalize(
bail!("No files to eqWAlize detected")
}
pre_parse_for_speed(reporter, analysis.clone(), &file_ids);
let files_count = file_ids.len();
let pb = reporter.progress(files_count as u64, "EqWAlizing");
let output = loaded.with_eqwalizer_progress_bar(pb.clone(), move |analysis| {
@ -509,7 +504,7 @@ fn eqwalize(
let file_id = analysis
.module_index(loaded.project_id)?
.file_for_module(module.as_str())
.with_context(|| format!("module {module} not found"))?;
.with_context(|| format!("module {} not found", module))?;
reporter.write_eqwalizer_diagnostics(file_id, &diagnostics)?;
if !diagnostics.is_empty() {
has_errors = true;
@ -564,7 +559,8 @@ fn eqwalize(
// The cached parse errors must be non-empty otherwise we wouldn't have `NoAst`
assert!(
!parse_diagnostics.is_empty(),
"Expecting erlang service diagnostics, but none found, for '{module}'"
"Expecting erlang service diagnostics, but none found, for '{}'",
module
);
let parse_diagnostics: Vec<_> = parse_diagnostics
.into_iter()
@ -591,6 +587,17 @@ fn eqwalize(
}
}
fn pre_parse_for_speed(reporter: &dyn Reporter, analysis: Analysis, file_ids: &[FileId]) {
let pb = reporter.progress(file_ids.len() as u64, "Parsing modules");
file_ids
.par_iter()
.progress_with(pb.clone())
.for_each_with(analysis, |analysis, &file_id| {
let _ = analysis.module_ast(file_id);
});
pb.finish();
}
fn set_eqwalizer_config(loaded: &mut LoadResult) {
let config = EqwalizerConfig::default();
let db = loaded.analysis_host.raw_database_mut();

View file

@ -11,7 +11,6 @@
use std::fs;
use std::path::Path;
use std::str;
use std::time::SystemTime;
use anyhow::Context;
use anyhow::Error;
@ -27,7 +26,6 @@ use elp_ide::Analysis;
use elp_ide::elp_ide_db::elp_base_db::FileId;
use elp_ide::elp_ide_db::elp_base_db::IncludeOtp;
use elp_ide::erlang_service::DiagnosticLocation;
use elp_log::telemetry;
use elp_log::timeit;
use elp_project_model::AppType;
use elp_project_model::DiscoverConfig;
@ -42,7 +40,6 @@ use crate::reporting::add_stat;
use crate::reporting::dump_stats;
pub fn parse_all(args: &ParseAll, cli: &mut dyn Cli, query_config: &BuckQueryConfig) -> Result<()> {
let start_time = SystemTime::now();
let config = DiscoverConfig::new(!args.buck, &args.profile);
let loaded = load::load_project_at(
cli,
@ -55,15 +52,10 @@ pub fn parse_all(args: &ParseAll, cli: &mut dyn Cli, query_config: &BuckQueryCon
build::compile_deps(&loaded, cli)?;
fs::create_dir_all(&args.to)?;
telemetry::report_elapsed_time("parse-all operational", start_time);
let parse_diagnostics = do_parse_all(cli, &loaded, &args.to, &args.module, args.buck)?;
if args.stats {
dump_stats(cli, args.list_modules);
}
telemetry::report_elapsed_time("parse-all done", start_time);
if !parse_diagnostics.is_empty() {
writeln!(
cli,
@ -137,7 +129,7 @@ pub fn do_parse_one(
let result = db.module_ast(file_id)?;
if result.is_ok() {
if let Some((name, to)) = to {
let to_path = to.join(format!("{name}.etf"));
let to_path = to.join(format!("{}.etf", name));
fs::write(to_path, &*result.ast)?;
}
Ok(vec![])
@ -150,15 +142,14 @@ pub fn do_parse_one(
.chain(result.warnings.iter())
.map(|err| {
let relative_path: &Path = err.path.strip_prefix(root_dir).unwrap_or(&err.path);
let (range, line_num) = match &err.location {
let (range, line_num) = match err.location {
None => (None, convert::position(&line_index, 0.into()).line + 1),
Some(DiagnosticLocation::Normal(range)) => (
Some(range),
convert::position(&line_index, range.start()).line + 1,
),
Some(DiagnosticLocation::Included {
file_attribute_location: directive_location,
error_path: _,
directive_location,
error_location: _,
}) => (
Some(directive_location),
@ -170,7 +161,7 @@ pub fn do_parse_one(
relative_path: relative_path.to_owned(),
line_num,
msg: err.msg.to_owned(),
range: range.copied(),
range,
}
})
.collect();

View file

@ -15,11 +15,11 @@ use elp_ide::diagnostics::DiagnosticCode;
use crate::args::Explain;
pub fn explain(args: &Explain, cli: &mut dyn Cli) -> Result<()> {
if let Some(code) = DiagnosticCode::maybe_from_string(&args.code)
&& let Some(uri) = DiagnosticCode::as_uri(&code)
{
let label = code.as_label();
return Ok(writeln!(cli, "{uri} ({label})")?);
if let Some(code) = DiagnosticCode::maybe_from_string(&args.code) {
if let Some(uri) = DiagnosticCode::as_uri(&code) {
let label = code.as_label();
return Ok(writeln!(cli, "{uri} ({label})")?);
}
}
Ok(writeln!(cli, "Unkwnown code: {}", args.code)?)
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -29,7 +29,6 @@ use elp::cli::Cli;
use elp::convert;
use elp::memory_usage::MemoryUsage;
use elp_ide::Analysis;
use elp_ide::AnalysisHost;
use elp_ide::TextRange;
use elp_ide::elp_ide_db::EqwalizerDiagnostic;
use elp_ide::elp_ide_db::elp_base_db::AbsPath;
@ -39,7 +38,6 @@ use indicatif::ProgressBar;
use itertools::Itertools;
use lazy_static::lazy_static;
use parking_lot::Mutex;
use vfs::Vfs;
pub trait Reporter {
fn write_eqwalizer_diagnostics(
@ -119,7 +117,7 @@ impl Reporter for PrettyReporter<'_> {
let range: Range<usize> =
diagnostic.range.start().into()..diagnostic.range.end().into();
let expr = match &diagnostic.expression {
Some(s) => format!("{s}.\n"),
Some(s) => format!("{}.\n", s),
None => "".to_string(),
};
@ -129,7 +127,7 @@ impl Reporter for PrettyReporter<'_> {
let mut labels = vec![msg_label];
if let Some(s) = &diagnostic.explanation {
let explanation_label =
Label::secondary(reporting_id, range).with_message(format!("\n\n{s}"));
Label::secondary(reporting_id, range).with_message(format!("\n\n{}", s));
labels.push(explanation_label);
};
let d: ReportingDiagnostic<usize> = ReportingDiagnostic::error()
@ -190,7 +188,7 @@ impl Reporter for PrettyReporter<'_> {
let duration = self.start.elapsed().as_secs();
self.cli.set_color(&YELLOW_COLOR_SPEC)?;
if count == total {
write!(self.cli, "eqWAlized {count} module(s) in {duration}s")?;
write!(self.cli, "eqWAlized {} module(s) in {}s", count, duration)?;
} else {
write!(
self.cli,
@ -227,6 +225,8 @@ impl Reporter for JsonReporter<'_> {
diagnostics: &[EqwalizerDiagnostic],
) -> Result<()> {
let line_index = self.analysis.line_index(file_id)?;
// Pass include_Tests = false so that errors for tests files that are not opted-in are tagged as arc_types::Severity::Disabled
let eqwalizer_enabled = self.analysis.is_eqwalizer_enabled(file_id, false).unwrap();
let file_path = &self.loaded.vfs.file_path(file_id);
let root_path = &self
.analysis
@ -235,10 +235,14 @@ impl Reporter for JsonReporter<'_> {
.root_dir;
let relative_path = get_relative_path(root_path, file_path);
for diagnostic in diagnostics {
let diagnostic =
convert::eqwalizer_to_arc_diagnostic(diagnostic, &line_index, relative_path);
let diagnostic = convert::eqwalizer_to_arc_diagnostic(
diagnostic,
&line_index,
relative_path,
eqwalizer_enabled,
);
let diagnostic = serde_json::to_string(&diagnostic)?;
writeln!(self.cli, "{diagnostic}")?;
writeln!(self.cli, "{}", diagnostic)?;
}
Ok(())
}
@ -254,10 +258,9 @@ impl Reporter for JsonReporter<'_> {
"ELP".to_string(),
diagnostic.msg.clone(),
None,
None,
);
let diagnostic = serde_json::to_string(&diagnostic)?;
writeln!(self.cli, "{diagnostic}")?;
writeln!(self.cli, "{}", diagnostic)?;
}
Ok(())
}
@ -278,10 +281,9 @@ impl Reporter for JsonReporter<'_> {
"ELP".to_string(),
description,
None,
None,
);
let diagnostic = serde_json::to_string(&diagnostic)?;
writeln!(self.cli, "{diagnostic}")?;
writeln!(self.cli, "{}", diagnostic)?;
Ok(())
}
@ -355,12 +357,12 @@ pub(crate) fn dump_stats(cli: &mut dyn Cli, list_modules: bool) {
if list_modules {
writeln!(cli, "--------------start of modules----------").ok();
stats.iter().sorted().for_each(|stat| {
writeln!(cli, "{stat}").ok();
writeln!(cli, "{}", stat).ok();
});
}
writeln!(cli, "{} modules processed", stats.len()).ok();
let mem_usage = MemoryUsage::now();
writeln!(cli, "{mem_usage}").ok();
writeln!(cli, "{}", mem_usage).ok();
}
lazy_static! {
@ -374,28 +376,3 @@ pub(crate) fn add_stat(stat: String) {
let mut stats = STATS.lock();
stats.push(stat);
}
pub(crate) fn print_memory_usage(
mut host: AnalysisHost,
vfs: Vfs,
cli: &mut dyn Cli,
) -> Result<()> {
let mem = host.per_query_memory_usage();
let before = profile::memory_usage();
drop(vfs);
let vfs = before.allocated - profile::memory_usage().allocated;
let before = profile::memory_usage();
drop(host);
let unaccounted = before.allocated - profile::memory_usage().allocated;
let remaining = profile::memory_usage().allocated;
for (name, bytes, entries) in mem {
writeln!(cli, "{bytes:>8} {entries:>6} {name}")?;
}
writeln!(cli, "{vfs:>8} VFS")?;
writeln!(cli, "{unaccounted:>8} Unaccounted")?;
writeln!(cli, "{remaining:>8} Remaining")?;
Ok(())
}

View file

@ -15,7 +15,6 @@ use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::sync::Arc;
use std::time::SystemTime;
use anyhow::Result;
use elp::build::load;
@ -30,7 +29,6 @@ use elp_ide::elp_ide_db::elp_base_db::SourceDatabaseExt;
use elp_ide::elp_ide_db::elp_base_db::SourceRoot;
use elp_ide::elp_ide_db::elp_base_db::SourceRootId;
use elp_ide::elp_ide_db::elp_base_db::VfsPath;
use elp_log::telemetry;
use elp_project_model::DiscoverConfig;
use elp_project_model::buck::BuckQueryConfig;
use paths::Utf8PathBuf;
@ -86,7 +84,7 @@ impl Watchman {
}
fn get_changes(&self, from: &WatchmanClock, patterns: Vec<&str>) -> Result<WatchmanChanges> {
let mut cmd = Self::cmd();
let mut cmd = Command::new("watchman");
cmd.arg("since");
cmd.arg(self.watch.as_os_str());
cmd.arg(&from.clock);
@ -105,14 +103,14 @@ enum ShellError {
impl fmt::Display for ShellError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ShellError::UnexpectedCommand(cmd) => write!(f, "Unexpected command {cmd}"),
ShellError::UnexpectedCommand(cmd) => write!(f, "Unexpected command {}", cmd),
ShellError::UnexpectedOption(cmd, arg) => {
write!(f, "Unexpected option {arg} for command {cmd}")
write!(f, "Unexpected option {} for command {}", arg, cmd)
}
ShellError::UnexpectedArg(cmd, arg) => {
write!(f, "Unexpected arg {arg} for command {cmd}")
write!(f, "Unexpected arg {} for command {}", arg, cmd)
}
ShellError::MissingArg(cmd) => write!(f, "Missing arg for command {cmd}"),
ShellError::MissingArg(cmd) => write!(f, "Missing arg for command {}", cmd),
}
}
}
@ -157,9 +155,10 @@ impl ShellCommand {
}
"eqwalize-app" => {
let include_generated = options.contains(&"--include-generated");
let include_tests = options.contains(&"--include-tests");
if let Some(other) = options
.into_iter()
.find(|&opt| opt != "--include-generated")
.find(|&opt| opt != "--include-generated" && opt != "--include-tests")
{
return Err(ShellError::UnexpectedOption(
"eqwalize-app".into(),
@ -176,6 +175,7 @@ impl ShellCommand {
rebar,
app: app.into(),
include_generated,
include_tests,
bail_on_error: false,
})));
}
@ -183,9 +183,10 @@ impl ShellCommand {
}
"eqwalize-all" => {
let include_generated = options.contains(&"--include-generated");
let include_tests = options.contains(&"--include-tests");
if let Some(other) = options
.into_iter()
.find(|&opt| opt != "--include-generated")
.find(|&opt| opt != "--include-generated" && opt != "--include-tests")
{
return Err(ShellError::UnexpectedOption(
"eqwalize-all".into(),
@ -201,6 +202,7 @@ impl ShellCommand {
rebar,
format: None,
include_generated,
include_tests,
bail_on_error: false,
stats: false,
list_modules: false,
@ -222,8 +224,10 @@ COMMANDS:
eqwalize <modules> Eqwalize specified modules
--clause-coverage Use experimental clause coverage checker
eqwalize-all Eqwalize all modules in the current project
--include-tests Also eqwalize test modules from project
--clause-coverage Use experimental clause coverage checker
eqwalize-app <app> Eqwalize all modules in specified application
--include-tests Also eqwalize test modules from project
--clause-coverage Use experimental clause coverage checker
";
@ -318,7 +322,7 @@ fn update_changes(
vfs.set_file_contents(vfs_path, None);
} else {
let contents =
fs::read(&path).unwrap_or_else(|_| panic!("Cannot read created file {path:?}"));
fs::read(&path).unwrap_or_else(|_| panic!("Cannot read created file {:?}", path));
vfs.set_file_contents(vfs_path, Some(contents));
}
});
@ -327,12 +331,6 @@ fn update_changes(
}
pub fn run_shell(shell: &Shell, cli: &mut dyn Cli, query_config: &BuckQueryConfig) -> Result<()> {
let start_time = SystemTime::now();
let mut cmd = Command::new("watchman");
let _ = cmd.arg("--version").output().map_err(|_| {
anyhow::Error::msg("`watchman` command not found. install it from https://facebook.github.io/watchman/ to use `elp shell`.")
})?;
let watchman = Watchman::new(&shell.project)
.map_err(|_err| anyhow::Error::msg(
"Could not find project. Are you in an Erlang project directory, or is one specified using --project?"
@ -346,10 +344,9 @@ pub fn run_shell(shell: &Shell, cli: &mut dyn Cli, query_config: &BuckQueryConfi
Mode::Shell,
query_config,
)?;
telemetry::report_elapsed_time("shell operational", start_time);
let mut rl = rustyline::DefaultEditor::new()?;
let mut last_read = watchman.get_clock()?;
write!(cli, "{WELCOME}")?;
write!(cli, "{}", WELCOME)?;
loop {
let readline = rl.readline("> ");
match readline {
@ -371,21 +368,21 @@ pub fn run_shell(shell: &Shell, cli: &mut dyn Cli, query_config: &BuckQueryConfi
last_read = update_changes(&mut loaded, &watchman, &last_read)?;
match ShellCommand::parse(shell, line) {
Ok(None) => (),
Ok(Some(ShellCommand::Help)) => write!(cli, "{HELP}")?,
Ok(Some(ShellCommand::Help)) => write!(cli, "{}", HELP)?,
Ok(Some(ShellCommand::Quit)) => break,
Ok(Some(ShellCommand::ShellEqwalize(eqwalize))) => {
eqwalizer_cli::do_eqwalize_module(&eqwalize, &mut loaded, cli)
.or_else(|e| writeln!(cli, "Error: {e}"))?;
.or_else(|e| writeln!(cli, "Error: {}", e))?;
}
Ok(Some(ShellCommand::ShellEqwalizeApp(eqwalize_app))) => {
eqwalizer_cli::do_eqwalize_app(&eqwalize_app, &mut loaded, cli)
.or_else(|e| writeln!(cli, "Error: {e}"))?;
.or_else(|e| writeln!(cli, "Error: {}", e))?;
}
Ok(Some(ShellCommand::ShellEqwalizeAll(eqwalize_all))) => {
eqwalizer_cli::do_eqwalize_all(&eqwalize_all, &mut loaded, cli)
.or_else(|e| writeln!(cli, "Error: {e}"))?;
.or_else(|e| writeln!(cli, "Error: {}", e))?;
}
Err(err) => write!(cli, "{err}\n{HELP}")?,
Err(err) => write!(cli, "{}\n{}", err, HELP)?,
}
}
Err(ReadlineError::Interrupted) => {
@ -396,11 +393,10 @@ pub fn run_shell(shell: &Shell, cli: &mut dyn Cli, query_config: &BuckQueryConfi
break;
}
Err(err) => {
writeln!(cli, "Error: {err:?}")?;
writeln!(cli, "Error: {:?}", err)?;
break;
}
}
}
telemetry::report_elapsed_time("shell done", start_time);
Ok(())
}

View file

@ -1,717 +0,0 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is dual-licensed under either the MIT license found in the
* LICENSE-MIT file in the root directory of this source tree or the Apache
* License, Version 2.0 found in the LICENSE-APACHE file in the root directory
* of this source tree. You may select, at your option, one of the
* above-listed licenses.
*/
use std::fs;
use std::path::Path;
use std::str;
use std::thread;
use std::time::SystemTime;
use anyhow::Result;
use anyhow::bail;
use crossbeam_channel::unbounded;
use elp::build::load;
use elp::build::types::LoadResult;
use elp::cli::Cli;
use elp::convert;
use elp::memory_usage::MemoryUsage;
use elp::otp_file_to_ignore;
use elp_eqwalizer::Mode;
use elp_ide::Analysis;
use elp_ide::AnalysisHost;
use elp_ide::diagnostics;
use elp_ide::diagnostics::DiagnosticsConfig;
use elp_ide::diagnostics::FallBackToAll;
use elp_ide::diagnostics::LintConfig;
use elp_ide::diagnostics::LintsFromConfig;
use elp_ide::diagnostics::MatchSsr;
use elp_ide::elp_ide_db::LineCol;
use elp_ide::elp_ide_db::elp_base_db::AbsPath;
use elp_ide::elp_ide_db::elp_base_db::FileId;
use elp_ide::elp_ide_db::elp_base_db::IncludeOtp;
use elp_ide::elp_ide_db::elp_base_db::ModuleName;
use elp_ide::elp_ide_db::elp_base_db::ProjectId;
use elp_ide::elp_ide_db::elp_base_db::VfsPath;
use elp_log::telemetry;
use elp_project_model::AppName;
use elp_project_model::AppType;
use elp_project_model::DiscoverConfig;
use elp_project_model::buck::BuckQueryConfig;
use hir::Semantic;
use paths::Utf8PathBuf;
use rayon::prelude::ParallelBridge;
use rayon::prelude::ParallelIterator;
use crate::args::Ssr;
use crate::reporting;
use crate::reporting::print_memory_usage;
fn normalize_ssr_pattern(pattern: &str) -> String {
if pattern.starts_with("ssr:") {
pattern.to_string()
} else {
format!("ssr: {}.", pattern)
}
}
pub fn run_ssr_command(
args: &Ssr,
cli: &mut dyn Cli,
query_config: &BuckQueryConfig,
use_color: bool,
) -> Result<()> {
let start_time = SystemTime::now();
let memory_start = MemoryUsage::now();
// Validate all SSR patterns early
let analysis_host = AnalysisHost::default();
let analysis = analysis_host.analysis();
for pattern in &args.ssr_specs {
let normalized_pattern = normalize_ssr_pattern(pattern);
match analysis.validate_ssr_pattern(&normalized_pattern) {
Ok(Ok(())) => {}
Ok(Err(e)) => bail!("invalid SSR pattern '{}': {}", pattern, e),
Err(_cancelled) => bail!("SSR pattern validation was cancelled"),
}
}
// Parse the strategy from CLI arguments
let strategy = args.parse_strategy()?;
// Create the lint config with all SSR patterns
let mut lint_config = LintConfig::default();
for pattern in &args.ssr_specs {
let normalized_pattern = normalize_ssr_pattern(pattern);
let severity = if args.dump_config {
// Set the severity so that squiggles are shown in the VS Code UI
Some(diagnostics::Severity::Information)
} else {
None
};
let ssr_lint = diagnostics::Lint::LintMatchSsr(MatchSsr {
ssr_pattern: normalized_pattern,
message: None,
strategy: Some(strategy),
severity,
});
lint_config.ad_hoc_lints.lints.push(ssr_lint);
}
// Build the diagnostics config
let diagnostics_config = DiagnosticsConfig::default()
.configure_diagnostics(
&lint_config,
&Some("ad-hoc: ssr-match".to_string()),
&None,
FallBackToAll::Yes,
)?
.set_include_generated(args.include_generated)
.set_experimental(false)
.set_use_cli_severity(false);
if args.dump_config {
let result = toml::to_string::<LintsFromConfig>(&diagnostics_config.lints_from_config)?;
// This is a subsection of .elp_lint.toml, add subsection prefix
let result = result.replace("[[lints]]", "[[ad_hoc_lints.lints]]");
writeln!(cli, "\n# Add this to your .elp_lint.toml")?;
writeln!(cli, "{}", result)?;
return Ok(());
}
// Load the project
let mut loaded = load_project(args, cli, query_config)?;
telemetry::report_elapsed_time("ssr operational", start_time);
let r = run_ssr(cli, &mut loaded, &diagnostics_config, args, use_color);
telemetry::report_elapsed_time("ssr done", start_time);
let memory_end = MemoryUsage::now();
let memory_used = memory_end - memory_start;
// Print memory usage at the end if requested and format is normal
if args.is_format_normal() && args.report_system_stats {
print_memory_usage(loaded.analysis_host, loaded.vfs, cli)?;
writeln!(cli, "{}", memory_used)?;
}
r
}
pub fn run_ssr(
cli: &mut dyn Cli,
loaded: &mut LoadResult,
diagnostics_config: &DiagnosticsConfig,
args: &Ssr,
use_color: bool,
) -> Result<()> {
let analysis = loaded.analysis();
let (file_id, name) = match &args.module {
Some(module) => match analysis.module_file_id(loaded.project_id, module)? {
Some(file_id) => {
if args.is_format_normal() {
writeln!(cli, "module specified: {module}")?;
}
(Some(file_id), analysis.module_name(file_id)?)
}
None => panic!("Module not found: {module}"),
},
None => match &args.file {
Some(file_name) => {
if args.is_format_normal() {
writeln!(cli, "file specified: {file_name}")?;
}
let path_buf = Utf8PathBuf::from_path_buf(fs::canonicalize(file_name).unwrap())
.expect("UTF8 conversion failed");
let path = AbsPath::assert(&path_buf);
let path = path.as_os_str().to_str().unwrap();
(
loaded
.vfs
.file_id(&VfsPath::new_real_path(path.to_string()))
.map(|(id, _)| id),
path_buf.as_path().file_name().map(ModuleName::new),
)
}
None => (None, None),
},
};
let mut match_count = 0;
match (file_id, name) {
(None, _) => {
// Streaming case: process all modules
let project_id = loaded.project_id;
do_parse_all_streaming(
cli,
&analysis,
&project_id,
diagnostics_config,
args,
use_color,
loaded,
&mut match_count,
)?;
}
(Some(file_id), Some(name)) => {
if let Some(app) = &args.app
&& let Ok(Some(file_app)) = analysis.file_app_name(file_id)
&& file_app != AppName(app.to_string())
{
panic!("Module {} does not belong to app {}", name.as_str(), app)
}
if let Some(diag) = do_parse_one(&analysis, diagnostics_config, file_id, &name, args)? {
match_count = 1;
print_single_result(cli, loaded, &diag, args, use_color)?;
}
}
(Some(file_id), _) => {
panic!("Could not get name from file_id for {file_id:?}")
}
};
if match_count == 0 {
if args.is_format_normal() {
writeln!(cli, "No matches found")?;
}
} else if args.is_format_normal() {
writeln!(cli, "\nMatches found in {} modules", match_count)?;
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn do_parse_all_streaming(
cli: &mut dyn Cli,
analysis: &Analysis,
project_id: &ProjectId,
config: &DiagnosticsConfig,
args: &Ssr,
use_color: bool,
loaded: &mut LoadResult,
match_count: &mut usize,
) -> Result<()> {
let module_index = analysis.module_index(*project_id).unwrap();
let app_name = args.app.as_ref().map(|name| AppName(name.to_string()));
// Create a channel for streaming results
let (tx, rx) = unbounded();
// Spawn a thread to process modules in parallel and send results
let analysis_clone = analysis.clone();
let config_clone = config.clone();
let args_clone = args.clone();
// Collect modules into an owned vector
let modules: Vec<_> = module_index
.iter_own()
.map(|(name, source, file_id)| (name.as_str().to_string(), source, file_id))
.collect();
thread::spawn(move || {
modules
.into_iter()
.par_bridge()
.map_with(
(analysis_clone, tx),
|(db, tx), (module_name, _file_source, file_id)| {
if !otp_file_to_ignore(db, file_id)
&& db.file_app_type(file_id).ok() != Some(Some(AppType::Dep))
&& (app_name.is_none()
|| db.file_app_name(file_id).ok().as_ref() == Some(&app_name))
&& let Ok(Some(result)) =
do_parse_one(db, &config_clone, file_id, &module_name, &args_clone)
{
// Send result through channel
let _ = tx.send(result);
}
},
)
.for_each(|_| {}); // Consume the iterator
// Channel is dropped here, signaling end of results
});
// Process and print results as they arrive from the channel
for result in rx {
*match_count += 1;
print_single_result(cli, loaded, &result, args, use_color)?;
}
Ok(())
}
fn print_single_result(
cli: &mut dyn Cli,
loaded: &mut LoadResult,
result: &(String, FileId, Vec<diagnostics::Diagnostic>),
args: &Ssr,
use_color: bool,
) -> Result<()> {
let (name, file_id, diags) = result;
if args.is_format_json() {
for diag in diags {
let vfs_path = loaded.vfs.file_path(*file_id);
let analysis = loaded.analysis();
let root_path = &analysis
.project_data(*file_id)
.unwrap_or_else(|_err| panic!("could not find project data"))
.unwrap_or_else(|| panic!("could not find project data"))
.root_dir;
let relative_path = reporting::get_relative_path(root_path, vfs_path);
print_diagnostic_json(diag, &analysis, *file_id, relative_path, false, cli)?;
}
} else {
writeln!(cli, " {}: {}", name, diags.len())?;
// Determine if we should show source context
let show_source = args.show_source
|| args.before_context.is_some()
|| args.after_context.is_some()
|| args.context.is_some()
|| args.group_separator.is_some()
|| args.no_group_separator;
let (before_lines, after_lines) = calculate_context_lines(args);
let has_context = before_lines > 0 || after_lines > 0;
let group_separator = should_show_group_separator(args, has_context && show_source);
for (idx, diag) in diags.iter().enumerate() {
// Print group separator before each match (except the first) if showing source with context
if show_source
&& idx > 0
&& let Some(ref sep) = group_separator
{
writeln!(cli, "{}", sep)?;
}
// Get relative path for diagnostic output
let vfs_path = loaded.vfs.file_path(*file_id);
let analysis = loaded.analysis();
let root_path = &analysis
.project_data(*file_id)
.unwrap_or_else(|_err| panic!("could not find project data"))
.unwrap_or_else(|| panic!("could not find project data"))
.root_dir;
let relative_path = reporting::get_relative_path(root_path, vfs_path);
// Only show path when showing source context
let path_to_show = if show_source {
Some(relative_path)
} else {
None
};
print_diagnostic(diag, &loaded.analysis(), *file_id, path_to_show, false, cli)?;
// Only show source context if --show-source or --show-source-markers is set
if show_source {
if use_color {
print_source_with_context(
diag,
&loaded.analysis(),
*file_id,
before_lines,
after_lines,
true,
cli,
)?;
} else {
print_source_with_context_markers(
diag,
&loaded.analysis(),
*file_id,
before_lines,
after_lines,
cli,
)?;
}
writeln!(cli)?;
}
}
}
Ok(())
}
fn load_project(
args: &Ssr,
cli: &mut dyn Cli,
query_config: &BuckQueryConfig,
) -> Result<LoadResult> {
log::info!("Loading project at: {:?}", args.project);
let config = DiscoverConfig::new(args.rebar, &args.profile);
load::load_project_at(
cli,
&args.project,
config,
IncludeOtp::Yes,
Mode::Server,
query_config,
)
}
fn do_parse_one(
db: &Analysis,
config: &DiagnosticsConfig,
file_id: FileId,
name: &str,
args: &Ssr,
) -> Result<Option<(String, FileId, Vec<diagnostics::Diagnostic>)>> {
if !args.include_generated && db.is_generated(file_id)? {
return Ok(None);
}
if !args.include_tests && db.is_test_suite_or_test_helper(file_id)?.unwrap_or(false) {
return Ok(None);
}
// Run only the SSR lint configured in lints_from_config
let diagnostics = db.with_db(|database| {
let sema = Semantic::new(database);
let mut diags = Vec::new();
config
.lints_from_config
.get_diagnostics(&mut diags, &sema, file_id);
diags
})?;
if !diagnostics.is_empty() {
let res = (name.to_string(), file_id, diagnostics);
Ok(Some(res))
} else {
Ok(None)
}
}
fn print_diagnostic(
diag: &diagnostics::Diagnostic,
analysis: &Analysis,
file_id: FileId,
path: Option<&Path>,
use_cli_severity: bool,
cli: &mut dyn Cli,
) -> Result<(), anyhow::Error> {
let line_index = analysis.line_index(file_id)?;
let diag_str = diag.print(&line_index, use_cli_severity);
if let Some(path) = path {
writeln!(cli, "{}:{}", path.display(), diag_str)?;
} else {
writeln!(cli, " {}", diag_str)?;
}
Ok(())
}
fn print_diagnostic_json(
diagnostic: &diagnostics::Diagnostic,
analysis: &Analysis,
file_id: FileId,
path: &Path,
use_cli_severity: bool,
cli: &mut dyn Cli,
) -> Result<(), anyhow::Error> {
let line_index = analysis.line_index(file_id)?;
let converted_diagnostic =
convert::ide_to_arc_diagnostic(&line_index, path, diagnostic, use_cli_severity);
writeln!(
cli,
"{}",
serde_json::to_string(&converted_diagnostic).unwrap_or_else(|err| panic!(
"print_diagnostics_json failed for '{converted_diagnostic:?}': {err}"
))
)?;
Ok(())
}
/// Print a line with color highlighting
fn print_line_with_color(
line_num: usize,
line_content: &str,
is_match_line: bool,
start: &LineCol,
end: &LineCol,
current_line: u32,
cli: &mut dyn Cli,
) -> Result<(), anyhow::Error> {
// Line number in gray
write!(cli, "\x1b[90m{:4} |\x1b[0m ", line_num)?;
if !is_match_line {
// Non-match line: print normally
writeln!(cli, "{}", line_content)?;
} else {
// Match line: highlight the matched portion
if current_line == start.line && current_line == end.line {
// Single-line match
let start_col = start.col_utf16 as usize;
let end_col = end.col_utf16 as usize;
let before = &line_content[..start_col.min(line_content.len())];
let matched =
&line_content[start_col.min(line_content.len())..end_col.min(line_content.len())];
let after = &line_content[end_col.min(line_content.len())..];
write!(cli, "{}", before)?;
write!(cli, "\x1b[91;1m{}\x1b[0m", matched)?; // Red bold
writeln!(cli, "{}", after)?;
} else if current_line == start.line {
// First line of multi-line match
let start_col = start.col_utf16 as usize;
let before = &line_content[..start_col.min(line_content.len())];
let matched = &line_content[start_col.min(line_content.len())..];
write!(cli, "{}", before)?;
writeln!(cli, "\x1b[91;1m{}\x1b[0m", matched)?; // Red bold
} else if current_line == end.line {
// Last line of multi-line match
let end_col = end.col_utf16 as usize;
let matched = &line_content[..end_col.min(line_content.len())];
let after = &line_content[end_col.min(line_content.len())..];
write!(cli, "\x1b[91;1m{}\x1b[0m", matched)?; // Red bold
writeln!(cli, "{}", after)?;
} else {
// Middle line of multi-line match
writeln!(cli, "\x1b[91;1m{}\x1b[0m", line_content)?; // Red bold
}
}
Ok(())
}
/// Calculate context lines from the new grep-style arguments
fn calculate_context_lines(args: &Ssr) -> (usize, usize) {
// -C/--context takes precedence and sets both before and after
if let Some(context) = args.context {
return (context, context);
}
// Otherwise use individual before/after values, defaulting to 0
let before = args.before_context.unwrap_or(0);
let after = args.after_context.unwrap_or(0);
(before, after)
}
/// Determine if a group separator should be shown
fn should_show_group_separator(args: &Ssr, has_context: bool) -> Option<String> {
// If --no-group-separator is set, don't show separator
if args.no_group_separator {
return None;
}
// Only show separators if there's context to separate
if !has_context {
return None;
}
// Use custom separator if provided, otherwise default to "--"
Some(
args.group_separator
.clone()
.unwrap_or_else(|| "--".to_string()),
)
}
/// Print source code context with the specified before/after context lines
fn print_source_with_context(
diag: &diagnostics::Diagnostic,
analysis: &Analysis,
file_id: FileId,
before_lines: usize,
after_lines: usize,
use_color: bool,
cli: &mut dyn Cli,
) -> Result<(), anyhow::Error> {
let line_index = analysis.line_index(file_id)?;
let source = &analysis.file_text(file_id)?;
let range = diag.range;
let start = line_index.line_col(range.start());
let end = line_index.line_col(range.end());
let lines: Vec<&str> = source.lines().collect();
let total_lines = lines.len();
// Calculate the range of lines to display
let first_line = start.line.saturating_sub(before_lines as u32) as usize;
let last_line = ((end.line + after_lines as u32 + 1) as usize).min(total_lines);
// Display the source context
for line_idx in first_line..last_line {
let line_num = line_idx + 1;
let line_content = lines.get(line_idx).unwrap_or(&"");
// Check if this line contains part of the match
let is_match_line = line_idx >= start.line as usize && line_idx <= end.line as usize;
if use_color {
print_line_with_color(
line_num,
line_content,
is_match_line,
&start,
&end,
line_idx as u32,
cli,
)?;
} else {
// Just print the line without any highlighting
write!(cli, "{:4} | ", line_num)?;
writeln!(cli, "{}", line_content)?;
}
}
Ok(())
}
/// Print source code context with text markers
fn print_source_with_context_markers(
diag: &diagnostics::Diagnostic,
analysis: &Analysis,
file_id: FileId,
before_lines: usize,
after_lines: usize,
cli: &mut dyn Cli,
) -> Result<(), anyhow::Error> {
let line_index = analysis.line_index(file_id)?;
let source = &analysis.file_text(file_id)?;
let range = diag.range;
let start = line_index.line_col(range.start());
let end = line_index.line_col(range.end());
let lines: Vec<&str> = source.lines().collect();
let total_lines = lines.len();
// Calculate the range of lines to display
let first_line = start.line.saturating_sub(before_lines as u32) as usize;
let last_line = ((end.line + after_lines as u32 + 1) as usize).min(total_lines);
// Display the source context
for line_idx in first_line..last_line {
let line_num = line_idx + 1;
let line_content = lines.get(line_idx).unwrap_or(&"");
// Check if this line contains part of the match
let is_match_line = line_idx >= start.line as usize && line_idx <= end.line as usize;
print_line_with_markers(
line_num,
line_content,
is_match_line,
&start,
&end,
line_idx as u32,
cli,
)?;
}
Ok(())
}
/// Print a line with text markers (like diagnostic carets)
fn print_line_with_markers(
line_num: usize,
line_content: &str,
is_match_line: bool,
start: &LineCol,
end: &LineCol,
current_line: u32,
cli: &mut dyn Cli,
) -> Result<(), anyhow::Error> {
// Line number
write!(cli, "{:4} | ", line_num)?;
writeln!(cli, "{}", line_content)?;
if is_match_line {
// Print marker line with ^^^ under the match
write!(cli, " | ")?; // Indent to match line content
if current_line == start.line && current_line == end.line {
// Single-line match
let start_col = start.col_utf16 as usize;
let end_col = end.col_utf16 as usize;
let marker_len = (end_col - start_col).max(1);
// Spaces before the marker
for _ in 0..start_col {
write!(cli, " ")?;
}
// Marker carets
for _ in 0..marker_len {
write!(cli, "^")?;
}
writeln!(cli)?;
} else if current_line == start.line {
// First line of multi-line match
let start_col = start.col_utf16 as usize;
let marker_len = line_content.len().saturating_sub(start_col).max(1);
for _ in 0..start_col {
write!(cli, " ")?;
}
for _ in 0..marker_len {
write!(cli, "^")?;
}
writeln!(cli)?;
} else if current_line == end.line {
// Last line of multi-line match
let end_col = end.col_utf16 as usize;
for _ in 0..end_col {
write!(cli, "^")?;
}
writeln!(cli)?;
} else {
// Middle line of multi-line match
for _ in 0..line_content.len() {
write!(cli, "^")?;
}
writeln!(cli)?;
}
}
Ok(())
}

View file

@ -78,9 +78,14 @@ pub fn load_project_at(
bail!("no projects")
};
log::info!("Discovered project: {manifest:?}");
log::info!("Discovered project: {:?}", manifest);
let pb = cli.spinner("Loading build info");
let project = Project::load(&manifest, &elp_config, query_config, &|_progress| {})?;
let project = Project::load(
&manifest,
elp_config.eqwalizer.clone(),
query_config,
&|_progress| {},
)?;
pb.finish();
load_project(cli, project, include_otp, eqwalizer_mode)
@ -192,13 +197,13 @@ fn load_database(
let changes = vfs.take_changes();
for (_file_id, file) in changes {
if file.exists()
&& let vfs::Change::Create(v, _) | vfs::Change::Modify(v, _) = file.change
{
let document = Document::from_bytes(&v);
let (text, line_ending) = document.vfs_to_salsa();
db.set_file_text(file.file_id, Arc::from(text));
line_ending_map.insert(file.file_id, line_ending);
if file.exists() {
if let vfs::Change::Create(v, _) | vfs::Change::Modify(v, _) = file.change {
let document = Document::from_bytes(&v);
let (text, line_ending) = document.vfs_to_salsa();
db.set_file_text(file.file_id, Arc::from(text));
line_ending_map.insert(file.file_id, line_ending);
}
}
}

View file

@ -30,13 +30,18 @@ pub trait Cli: Write + WriteColor {
fn err(&mut self) -> &mut dyn Write;
}
pub struct StandardCli(StandardStream, Stderr);
pub struct Real(StandardStream, Stderr);
impl StandardCli {
fn new(color_choice: ColorChoice) -> Self {
Self(StandardStream::stdout(color_choice), std::io::stderr())
impl Default for Real {
fn default() -> Self {
Self(
StandardStream::stdout(ColorChoice::Always),
std::io::stderr(),
)
}
}
impl Real {
fn progress_with_style(
&self,
len: u64,
@ -54,7 +59,7 @@ impl StandardCli {
}
}
impl Cli for StandardCli {
impl Cli for Real {
fn progress(&self, len: u64, prefix: &'static str) -> ProgressBar {
self.progress_with_style(len, prefix, " {prefix:25!} {bar} {pos}/{len} {wide_msg}")
}
@ -79,63 +84,6 @@ impl Cli for StandardCli {
}
}
impl Write for StandardCli {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.0.flush()
}
}
impl WriteColor for StandardCli {
fn supports_color(&self) -> bool {
self.0.supports_color()
}
fn set_color(&mut self, spec: &ColorSpec) -> std::io::Result<()> {
self.0.set_color(spec)
}
fn reset(&mut self) -> std::io::Result<()> {
self.0.reset()
}
}
pub struct Real(StandardCli);
pub struct NoColor(StandardCli);
impl Default for Real {
fn default() -> Self {
Real(StandardCli::new(ColorChoice::Always))
}
}
impl Default for NoColor {
fn default() -> Self {
NoColor(StandardCli::new(ColorChoice::Never))
}
}
impl Cli for Real {
fn progress(&self, len: u64, prefix: &'static str) -> ProgressBar {
self.0.progress(len, prefix)
}
fn simple_progress(&self, len: u64, prefix: &'static str) -> ProgressBar {
self.0.simple_progress(len, prefix)
}
fn spinner(&self, prefix: &'static str) -> ProgressBar {
self.0.spinner(prefix)
}
fn err(&mut self) -> &mut dyn Write {
self.0.err()
}
}
impl Write for Real {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.0.write(buf)
@ -160,48 +108,6 @@ impl WriteColor for Real {
}
}
impl Cli for NoColor {
fn progress(&self, len: u64, prefix: &'static str) -> ProgressBar {
self.0.progress(len, prefix)
}
fn simple_progress(&self, len: u64, prefix: &'static str) -> ProgressBar {
self.0.simple_progress(len, prefix)
}
fn spinner(&self, prefix: &'static str) -> ProgressBar {
self.0.spinner(prefix)
}
fn err(&mut self) -> &mut dyn Write {
self.0.err()
}
}
impl Write for NoColor {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.0.flush()
}
}
impl WriteColor for NoColor {
fn supports_color(&self) -> bool {
self.0.supports_color()
}
fn set_color(&mut self, spec: &ColorSpec) -> std::io::Result<()> {
self.0.set_color(spec)
}
fn reset(&mut self) -> std::io::Result<()> {
self.0.reset()
}
}
pub struct Fake(Buffer, Vec<u8>);
impl Default for Fake {

View file

@ -30,7 +30,7 @@ use serde::de::DeserializeOwned;
use serde_json::json;
use crate::from_json;
// @fb-only: use crate::meta_only;
// @fb-only
// Defines the server-side configuration of ELP. We generate *parts*
// of VS Code's `package.json` config from this.
@ -42,8 +42,6 @@ use crate::from_json;
// `new_name | `old_name` so that we keep parsing the old name.
config_data! {
struct ConfigData {
/// Whether to use the expermintal `buck2 targets` quick start process.
buck_quickStart: bool = json! { false },
/// Whether to show experimental ELP diagnostics that might
/// have more false positives than usual.
diagnostics_enableExperimental: bool = json! { false },
@ -91,12 +89,6 @@ config_data! {
/// Whether to show the `Link` lenses. Only applies when
/// `#elp.lens.enable#` is set.
lens_links_enable: bool = json! { false },
/// Whether to enable LogView lens links.
lens_logview_links: bool = json! { false },
/// Whether to enable Scuba lens links.
lens_scuba_links: bool = json! { false },
/// Whether to enable WAM lens links.
lens_wam_links: bool = json! { false },
/// Configure LSP-based logging using env_logger syntax.
log: String = json! { "error" },
/// Whether to show Signature Help.
@ -136,9 +128,6 @@ pub struct LensConfig {
pub buck2_mode: Option<String>,
pub debug: bool,
pub links: bool,
pub logview_links: bool,
pub scuba_links: bool,
pub wam_links: bool,
}
#[derive(Clone, Debug, PartialEq, Eq)]
@ -154,12 +143,12 @@ pub struct EqwalizerConfig {
}
macro_rules! try_ {
($expr:expr) => {
($expr:expr_2021) => {
|| -> _ { Some($expr) }()
};
}
macro_rules! try_or {
($expr:expr, $or:expr) => {
($expr:expr_2021, $or:expr_2021) => {
try_!($expr).unwrap_or($or)
};
}
@ -175,22 +164,22 @@ impl Config {
}
pub fn update(&mut self, json: serde_json::Value) {
log::info!("updating config from JSON: {json:#}");
log::info!("updating config from JSON: {:#}", json);
if json.is_null() || json.as_object().is_some_and(|it| it.is_empty()) {
return;
}
self.data = ConfigData::from_json(json);
// @fb-only: meta_only::harmonise_gks(self);
// @fb-only
}
pub fn update_gks(&mut self, json: serde_json::Value) {
log::info!("updating gks from JSON: {json:#}");
log::info!("updating gks from JSON: {:#}", json);
if json.is_null() || json.as_object().is_some_and(|it| it.is_empty()) {
return;
}
match from_json::<GKs>("GKs", json) {
Ok(val) => self.gks = val,
Err(err) => log::warn!("could not update GKs from JSON: {err:#}"),
Err(err) => log::warn!("could not update GKs from JSON: {:#}", err),
}
}
@ -334,9 +323,6 @@ impl Config {
&& self.data.lens_run_coverage_enable,
debug: self.data.lens_enable && self.data.lens_debug_enable,
links: self.data.lens_enable && self.data.lens_links_enable,
logview_links: self.data.lens_enable && self.data.lens_logview_links,
scuba_links: self.data.lens_enable && self.data.lens_scuba_links,
wam_links: self.data.lens_enable && self.data.lens_wam_links,
}
}
@ -381,38 +367,14 @@ impl Config {
try_or!(self.caps.window.as_ref()?.work_done_progress?, false)
}
pub fn set_buck_quick_start(&mut self, value: bool) {
self.data.buck_quickStart = value;
}
pub fn buck_quick_start(&self) -> bool {
self.data.buck_quickStart
}
pub fn buck_query(&self) -> BuckQueryConfig {
if self.buck_quick_start() {
BuckQueryConfig::BuckTargetsOnly
} else {
BuckQueryConfig::BuildGeneratedCode
}
BuckQueryConfig::BuildGeneratedCode
}
pub fn set_eqwalizer_all(&mut self, value: bool) {
self.data.eqwalizer_all = value;
}
pub fn set_lens_logview_links(&mut self, value: bool) {
self.data.lens_logview_links = value;
}
pub fn set_lens_scuba_links(&mut self, value: bool) {
self.data.lens_scuba_links = value;
}
pub fn set_lens_wam_links(&mut self, value: bool) {
self.data.lens_wam_links = value;
}
pub fn inlay_hints(&self) -> InlayHintsConfig {
InlayHintsConfig {
parameter_hints: self.data.inlayHints_parameterHints_enable,
@ -439,7 +401,7 @@ macro_rules! _config_data {
(struct $name:ident {
$(
$(#[doc=$doc:literal])*
$field:ident $(| $alias:ident)*: $ty:ty = $default:expr,
$field:ident $(| $alias:ident)*: $ty:ty = $default:expr_2021,
)*
}) => {
#[allow(non_snake_case)]
@ -503,7 +465,7 @@ fn schema(
fn key(f: &str) -> &str {
f.split_once('_').map_or(f, |x| x.0)
}
assert!(key(f1) <= key(f2), "wrong field order: {f1:?} {f2:?}");
assert!(key(f1) <= key(f2), "wrong field order: {:?} {:?}", f1, f2);
}
let map = fields
@ -528,7 +490,9 @@ fn field_props(
let doc = doc.trim_end_matches('\n');
assert!(
doc.ends_with('.') && doc.starts_with(char::is_uppercase),
"bad docs for {field}: {doc:?}"
"bad docs for {}: {:?}",
field,
doc
);
let mut map = serde_json::Map::default();
@ -577,7 +541,7 @@ fn field_props(
"type": ["null", "array"],
"items": { "type": "string" },
},
_ => panic!("{ty}: {default}"),
_ => panic!("{}: {}", ty, default),
}
map.into()
@ -587,14 +551,14 @@ fn doc_comment_to_string(doc: &[&str]) -> String {
doc.iter()
.map(|it| it.strip_prefix(' ').unwrap_or(it))
.fold(String::new(), |mut output, it| {
let _ = writeln!(output, "{it}");
let _ = writeln!(output, "{}", it);
output
})
}
pub fn config_schema_json() -> String {
let s = Config::json_schema();
let schema = format!("{s:#}");
let schema = format!("{:#}", s);
let mut schema = schema
.trim_start_matches('{')
.trim_end_matches('}')
@ -618,15 +582,10 @@ mod tests {
let s = remove_ws(&schema);
expect![[r#""elp.buck.quickStart":{"default":false,"markdownDescription":"Whethertousetheexpermintal`buck2targets`quickstartprocess.","type":"boolean"},"elp.diagnostics.disabled":{"default":[],"items":{"type":"string"},"markdownDescription":"ListofELPdiagnosticstodisable.","type":"array","uniqueItems":true},"elp.diagnostics.enableExperimental":{"default":false,"markdownDescription":"WhethertoshowexperimentalELPdiagnosticsthatmight\nhavemorefalsepositivesthanusual.","type":"boolean"},"elp.diagnostics.enableOtp":{"default":false,"markdownDescription":"WhethertoreportdiagnosticsforOTPfiles.","type":"boolean"},"elp.diagnostics.onSave.enable":{"default":false,"markdownDescription":"Updatenativediagnosticsonlywhenthefileissaved.","type":"boolean"},"elp.edoc.enable":{"default":false,"markdownDescription":"WhethertoreportEDocdiagnostics.","type":"boolean"},"elp.eqwalizer.all":{"default":false,"markdownDescription":"WhethertoreportEqwalizerdiagnosticsforthewholeprojectandnotonlyforopenedfiles.","type":"boolean"},"elp.eqwalizer.chunkSize":{"default":100,"markdownDescription":"Chunksizetouseforproject-wideeqwalization.","minimum":0,"type":"integer"},"elp.eqwalizer.maxTasks":{"default":32,"markdownDescription":"Maximumnumberoftaskstoruninparallelforproject-wideeqwalization.","minimum":0,"type":"integer"},"elp.highlightDynamic.enable":{"default":false,"markdownDescription":"Ifenabled,highlightvariableswithtype`dynamic()`whenEqwalizerresultsareavailable.","type":"boolean"},"elp.hoverActions.docLinks.enable":{"default":false,"markdownDescription":"WhethertoshowHoverActionsoftype`docs`.Onlyapplieswhen\n`#elp.hoverActions.enable#`isset.","type":"boolean"},"elp.hoverActions.enable":{"default":false,"markdownDescription":"WhethertoshowHoverActions.","type":"boolean"},"elp.inlayHints.parameterHints.enable":{"default":true,"markdownDescription":"Whethertoshowfunctionparameternameinlayhintsatthecall\nsite.","type":"boolean"},"elp.lens.buck2.mode":{"default":null,"markdownDescription":"Thebuck2modetouseforrunningtestsviathecodelenses.","type":["null","string"]},"elp.lens.debug.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Debug`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.enable":{"default":false,"markdownDescription":"WhethertoshowCodeLensesinErlangfiles.","type":"boolean"},"elp.lens.links.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Link`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.logview.links":{"default":false,"markdownDescription":"WhethertoenableLogViewlenslinks.","type":"boolean"},"elp.lens.run.coverage.enable":{"default":true,"markdownDescription":"Displaycodecoverageinformationwhenrunningtestsviathe\nCodeLenses.Onlyapplieswhen`#elp.lens.enabled`and\n`#elp.lens.run.enable#`areset.","type":"boolean"},"elp.lens.run.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Run`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.run.interactive.enable":{"default":false,"markdownDescription":"Whethertoshowthe`RunInteractive`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.scuba.links":{"default":false,"markdownDescription":"WhethertoenableScubalenslinks.","type":"boolean"},"elp.lens.wam.links":{"default":false,"markdownDescription":"WhethertoenableWAMlenslinks.","type":"boolean"},"elp.log":{"default":"error","markdownDescription":"ConfigureLSP-basedloggingusingenv_loggersyntax.","type":"string"},"elp.signatureHelp.enable":{"default":true,"markdownDescription":"WhethertoshowSignatureHelp.","type":"boolean"},"elp.typesOnHover.enable":{"default":false,"markdownDescription":"Displaytypeswhenhoveringoverexpressions.","type":"boolean"},"#]]
expect![[r#""elp.diagnostics.disabled":{"default":[],"items":{"type":"string"},"markdownDescription":"ListofELPdiagnosticstodisable.","type":"array","uniqueItems":true},"elp.diagnostics.enableExperimental":{"default":false,"markdownDescription":"WhethertoshowexperimentalELPdiagnosticsthatmight\nhavemorefalsepositivesthanusual.","type":"boolean"},"elp.diagnostics.enableOtp":{"default":false,"markdownDescription":"WhethertoreportdiagnosticsforOTPfiles.","type":"boolean"},"elp.diagnostics.onSave.enable":{"default":false,"markdownDescription":"Updatenativediagnosticsonlywhenthefileissaved.","type":"boolean"},"elp.edoc.enable":{"default":false,"markdownDescription":"WhethertoreportEDocdiagnostics.","type":"boolean"},"elp.eqwalizer.all":{"default":false,"markdownDescription":"WhethertoreportEqwalizerdiagnosticsforthewholeprojectandnotonlyforopenedfiles.","type":"boolean"},"elp.eqwalizer.chunkSize":{"default":100,"markdownDescription":"Chunksizetouseforproject-wideeqwalization.","minimum":0,"type":"integer"},"elp.eqwalizer.maxTasks":{"default":32,"markdownDescription":"Maximumnumberoftaskstoruninparallelforproject-wideeqwalization.","minimum":0,"type":"integer"},"elp.highlightDynamic.enable":{"default":false,"markdownDescription":"Ifenabled,highlightvariableswithtype`dynamic()`whenEqwalizerresultsareavailable.","type":"boolean"},"elp.hoverActions.docLinks.enable":{"default":false,"markdownDescription":"WhethertoshowHoverActionsoftype`docs`.Onlyapplieswhen\n`#elp.hoverActions.enable#`isset.","type":"boolean"},"elp.hoverActions.enable":{"default":false,"markdownDescription":"WhethertoshowHoverActions.","type":"boolean"},"elp.inlayHints.parameterHints.enable":{"default":true,"markdownDescription":"Whethertoshowfunctionparameternameinlayhintsatthecall\nsite.","type":"boolean"},"elp.lens.buck2.mode":{"default":null,"markdownDescription":"Thebuck2modetouseforrunningtestsviathecodelenses.","type":["null","string"]},"elp.lens.debug.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Debug`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.enable":{"default":false,"markdownDescription":"WhethertoshowCodeLensesinErlangfiles.","type":"boolean"},"elp.lens.links.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Link`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.run.coverage.enable":{"default":true,"markdownDescription":"Displaycodecoverageinformationwhenrunningtestsviathe\nCodeLenses.Onlyapplieswhen`#elp.lens.enabled`and\n`#elp.lens.run.enable#`areset.","type":"boolean"},"elp.lens.run.enable":{"default":false,"markdownDescription":"Whethertoshowthe`Run`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.lens.run.interactive.enable":{"default":false,"markdownDescription":"Whethertoshowthe`RunInteractive`lenses.Onlyapplieswhen\n`#elp.lens.enable#`isset.","type":"boolean"},"elp.log":{"default":"error","markdownDescription":"ConfigureLSP-basedloggingusingenv_loggersyntax.","type":"string"},"elp.signatureHelp.enable":{"default":true,"markdownDescription":"WhethertoshowSignatureHelp.","type":"boolean"},"elp.typesOnHover.enable":{"default":false,"markdownDescription":"Displaytypeswhenhoveringoverexpressions.","type":"boolean"},"#]]
.assert_eq(s.as_str());
expect![[r#"
"elp.buck.quickStart": {
"default": false,
"markdownDescription": "Whether to use the expermintal `buck2 targets` quick start process.",
"type": "boolean"
},
"elp.diagnostics.disabled": {
"default": [],
"items": {
@ -716,11 +675,6 @@ mod tests {
"markdownDescription": "Whether to show the `Link` lenses. Only applies when\n`#elp.lens.enable#` is set.",
"type": "boolean"
},
"elp.lens.logview.links": {
"default": false,
"markdownDescription": "Whether to enable LogView lens links.",
"type": "boolean"
},
"elp.lens.run.coverage.enable": {
"default": true,
"markdownDescription": "Display code coverage information when running tests via the\nCode Lenses. Only applies when `#elp.lens.enabled` and\n`#elp.lens.run.enable#` are set.",
@ -736,16 +690,6 @@ mod tests {
"markdownDescription": "Whether to show the `Run Interactive` lenses. Only applies when\n`#elp.lens.enable#` is set.",
"type": "boolean"
},
"elp.lens.scuba.links": {
"default": false,
"markdownDescription": "Whether to enable Scuba lens links.",
"type": "boolean"
},
"elp.lens.wam.links": {
"default": false,
"markdownDescription": "Whether to enable WAM lens links.",
"type": "boolean"
},
"elp.log": {
"default": "error",
"markdownDescription": "Configure LSP-based logging using env_logger syntax.",

View file

@ -26,7 +26,6 @@ use elp_ide::elp_ide_db::assists::AssistContextDiagnostic;
use elp_ide::elp_ide_db::assists::AssistContextDiagnosticCode;
use elp_ide::elp_ide_db::elp_base_db::AbsPath;
use elp_ide::elp_ide_db::elp_base_db::AbsPathBuf;
use elp_ide::elp_ide_db::elp_base_db::FileId;
use elp_ide::elp_ide_db::elp_base_db::VfsPath;
use lsp_types::DiagnosticRelatedInformation;
use lsp_types::Location;
@ -68,14 +67,11 @@ pub fn diagnostic_severity(severity: Severity) -> lsp_types::DiagnosticSeverity
}
}
pub fn ide_to_lsp_diagnostic<F>(
pub fn ide_to_lsp_diagnostic(
line_index: &LineIndex,
url: &Url,
d: &Diagnostic,
get_file_info: F,
) -> lsp_types::Diagnostic
where
F: Fn(FileId) -> Option<(LineIndex, Url)>,
{
) -> lsp_types::Diagnostic {
let code_description = match &d.code_doc_uri {
Some(uri) => match lsp_types::Url::parse(uri) {
Ok(href) => Some(lsp_types::CodeDescription { href }),
@ -94,16 +90,17 @@ where
code_description,
source,
message: d.message.clone(),
related_information: from_related(get_file_info, &d.related_info),
tags: d.tag.as_ref().map(lsp_diagnostic_tags),
related_information: from_related(line_index, url, &d.related_info),
tags: lsp_diagnostic_tags(&d.tag),
data: None,
}
}
fn lsp_diagnostic_tags(d: &DiagnosticTag) -> Vec<lsp_types::DiagnosticTag> {
fn lsp_diagnostic_tags(d: &DiagnosticTag) -> Option<Vec<lsp_types::DiagnosticTag>> {
match d {
DiagnosticTag::Unused => vec![lsp_types::DiagnosticTag::UNNECESSARY],
DiagnosticTag::Deprecated => vec![lsp_types::DiagnosticTag::DEPRECATED],
DiagnosticTag::None => None,
DiagnosticTag::Unused => Some(vec![lsp_types::DiagnosticTag::UNNECESSARY]),
DiagnosticTag::Deprecated => Some(vec![lsp_types::DiagnosticTag::DEPRECATED]),
}
}
@ -126,14 +123,22 @@ pub fn eqwalizer_to_arc_diagnostic(
d: &EqwalizerDiagnostic,
line_index: &LineIndex,
relative_path: &Path,
eqwalizer_enabled: bool,
) -> arc_types::Diagnostic {
let pos = position(line_index, d.range.start());
let line_num = pos.line + 1;
let character = Some(pos.character + 1);
let severity = arc_types::Severity::Error;
let severity = if eqwalizer_enabled {
arc_types::Severity::Error
} else {
// We use Severity::Disabled so that we have the ability in our arc linter to choose
// to display lints for *new* files with errors that are not opted in (T118466310).
// See comment at the top of eqwalizer_cli.rs for more information.
arc_types::Severity::Disabled
};
// formatting: https://fburl.com/max_wiki_link_to_phabricator_rich_text
let explanation = match &d.explanation {
Some(s) => format!("```\n{s}\n```"),
Some(s) => format!("```\n{}\n```", s),
None => "".to_string(),
};
let link = format!("> [docs on `{}`]({})", d.code, d.uri);
@ -158,30 +163,25 @@ pub fn eqwalizer_to_arc_diagnostic(
name,
message,
d.expression.clone(),
None,
)
}
fn from_related<F>(
get_file_info: F,
fn from_related(
line_index: &LineIndex,
url: &Url,
r: &Option<Vec<RelatedInformation>>,
) -> Option<Vec<DiagnosticRelatedInformation>>
where
F: Fn(elp_ide::elp_ide_db::elp_base_db::FileId) -> Option<(LineIndex, Url)>,
{
) -> Option<Vec<DiagnosticRelatedInformation>> {
r.as_ref().map(|ri| {
ri.iter()
.filter_map(|i| {
// Get the line index and URL for the file that contains the related information
let (line_index, uri) = get_file_info(i.file_id)?;
.map(|i| {
let location = Location {
range: range(&line_index, i.range),
uri,
range: range(line_index, i.range),
uri: url.clone(),
};
Some(DiagnosticRelatedInformation {
DiagnosticRelatedInformation {
location,
message: i.message.clone(),
})
}
})
.collect()
})
@ -251,7 +251,6 @@ pub fn ide_to_arc_diagnostic(
None => message,
};
let severity = diagnostic.severity(use_cli_severity);
let doc_path = diagnostic.code.as_doc_path();
arc_types::Diagnostic::new(
path,
line_num,
@ -260,6 +259,5 @@ pub fn ide_to_arc_diagnostic(
diagnostic.code.as_labeled_code(),
description,
None,
doc_path,
)
}

View file

@ -17,7 +17,6 @@ use std::time::SystemTime;
use anyhow::Result;
use anyhow::bail;
use elp_ide::Cancellable;
use elp_ide::DocResult;
use elp_ide::HighlightedRange;
use elp_ide::NavigationTarget;
use elp_ide::RangeInfo;
@ -33,8 +32,6 @@ use elp_ide::elp_ide_db::elp_base_db::FilePosition;
use elp_ide::elp_ide_db::elp_base_db::FileRange;
use elp_ide::elp_ide_db::elp_base_db::ProjectId;
use elp_log::telemetry;
use elp_log::timeit_with_telemetry;
use elp_syntax::SmolStr;
use itertools::Itertools;
use lsp_server::ErrorCode;
use lsp_types::CallHierarchyIncomingCall;
@ -67,7 +64,6 @@ use crate::convert::lsp_to_assist_context_diagnostic;
use crate::from_proto;
use crate::lsp_ext;
use crate::snapshot::Snapshot;
use crate::snapshot::TelemetryData;
use crate::to_proto;
pub(crate) fn handle_code_action(
@ -218,7 +214,7 @@ fn parse_action_id(action_id: &str) -> Result<(usize, SingleResolve), String> {
let assist_kind: AssistKind = assist_kind_string.parse()?;
let index: usize = match index_string.parse() {
Ok(index) => index,
Err(e) => return Err(format!("Incorrect index string: {e}")),
Err(e) => return Err(format!("Incorrect index string: {}", e)),
};
Ok((
index,
@ -343,22 +339,16 @@ fn goto_definition_telemetry(snap: &Snapshot, targets: &[NavigationTarget], star
.iter()
.map(|tgt| snap.file_id_to_url(tgt.file_id))
.collect();
let target_names: Vec<_> = targets.iter().map(|tgt| tgt.name.clone()).collect();
let target_kinds: Vec<_> = targets.iter().map(|tgt| tgt.kind).collect();
#[derive(serde::Serialize)]
struct Data {
targets_include_generated: bool,
target_urls: Vec<Url>,
target_names: Vec<SmolStr>,
target_kinds: Vec<SymbolKind>,
}
let detail = Data {
targets_include_generated,
target_urls,
target_names,
target_kinds,
};
let duration = start.elapsed().map(|e| e.as_millis()).unwrap_or(0) as u32;
let data = serde_json::to_value(detail).unwrap_or_else(|err| {
@ -367,24 +357,6 @@ fn goto_definition_telemetry(snap: &Snapshot, targets: &[NavigationTarget], star
telemetry::send_with_duration("goto_definition".to_string(), data, duration, start);
}
fn send_hover_telemetry(doc_result: &DocResult) {
#[derive(serde::Serialize)]
struct Data {
docs_found: bool,
text: String,
kind: String,
}
let detail = Data {
docs_found: doc_result.doc.is_some(),
text: doc_result.token_text.clone(),
kind: format!("{:?}", doc_result.token_kind),
};
let data = serde_json::to_value(detail).unwrap_or_else(|err| {
serde_json::Value::String(format!("JSON serialization failed: {err}"))
});
telemetry::send("hover".to_string(), data);
}
pub(crate) fn handle_goto_type_definition(
snap: Snapshot,
params: lsp_types::GotoDefinitionParams,
@ -414,16 +386,10 @@ pub(crate) fn handle_references(
params: lsp_types::ReferenceParams,
) -> Result<Option<Vec<lsp_types::Location>>> {
let _p = tracing::info_span!("handle_references").entered();
let _timer = timeit_with_telemetry!(TelemetryData::References {
file_url: params.text_document_position.text_document.uri.clone(),
position: params.text_document_position.position
});
let mut position = from_proto::file_position(&snap, params.text_document_position)?;
position.offset = snap
.analysis
.clamp_offset(position.file_id, position.offset)?;
let refs = match snap.analysis.find_all_refs(position)? {
None => return Ok(None),
Some(it) => it,
@ -447,7 +413,6 @@ pub(crate) fn handle_references(
.chain(decl)
})
.collect();
Ok(Some(locations))
}
@ -484,10 +449,8 @@ pub(crate) fn handle_completion_resolve(
position.offset = snap
.analysis
.clamp_offset(position.file_id, position.offset)?;
if let Ok(Some(doc_result)) = snap.analysis.get_docs_at_position(position)
&& let Some(doc) = doc_result.doc
{
let docs = doc.markdown_text().to_string();
if let Ok(Some(res)) = snap.analysis.get_docs_at_position(position) {
let docs = res.0.markdown_text().to_string();
let documentation =
lsp_types::Documentation::MarkupContent(lsp_types::MarkupContent {
kind: lsp_types::MarkupKind::Markdown,
@ -582,43 +545,38 @@ pub(crate) fn handle_hover(snap: Snapshot, params: HoverParams) -> Result<Option
let mut docs: Vec<(Doc, Option<FileRange>)> = Vec::default();
if snap.config.types_on_hover()
&& let Some(type_info) = snap.analysis.type_at_position(query_range)?
{
let (ty, range) = &*type_info;
let text = &snap.analysis.file_text(range.file_id)?[range.range];
let type_doc = Doc::new(format!("```erlang\n{text} :: {ty}\n```\n"));
docs.push((type_doc, Some(range.to_owned())));
let refs = snap.analysis.type_references(range.file_id, ty)?;
if !refs.is_empty() {
let goto_list = refs
.into_iter()
.flat_map(|(name, range)| {
to_proto::location(&snap, range)
.map(|loc| {
format!(
"[{}]({}#L{}-{})",
name,
loc.uri,
loc.range.start.line + 1,
loc.range.end.line + 1
)
})
.ok()
})
.join(" | ");
let goto_docs = Doc::new(format!("Go to: {goto_list}"));
docs.push((goto_docs, None));
if snap.config.types_on_hover() {
if let Some(type_info) = snap.analysis.type_at_position(query_range)? {
let (ty, range) = &*type_info;
let text = &snap.analysis.file_text(range.file_id)?[range.range];
let type_doc = Doc::new(format!("```erlang\n{} :: {}\n```\n", text, ty));
docs.push((type_doc, Some(range.to_owned())));
let refs = snap.analysis.type_references(range.file_id, ty)?;
if !refs.is_empty() {
let goto_list = refs
.into_iter()
.flat_map(|(name, range)| {
to_proto::location(&snap, range)
.map(|loc| {
format!(
"[{}]({}#L{}-{})",
name,
loc.uri,
loc.range.start.line + 1,
loc.range.end.line + 1
)
})
.ok()
})
.join(" | ");
let goto_docs = Doc::new(format!("Go to: {}", goto_list));
docs.push((goto_docs, None));
}
}
}
if let Some(doc_result) = snap.analysis.get_docs_at_position(position)? {
send_hover_telemetry(&doc_result);
let doc = doc_result.doc;
if let Some(doc) = doc {
let range = doc_result.token_range;
docs.push((doc, Some(range)));
}
if let Some(hover) = snap.analysis.get_docs_at_position(position)? {
docs.push(hover);
}
if let Some(macro_expansion) = snap.analysis.expand_macro(position)? {
@ -886,12 +844,12 @@ pub(crate) fn handle_semantic_tokens_full_delta(
let mut cache = snap.semantic_tokens_cache.lock();
let cached_tokens = cache.entry(params.text_document.uri).or_default();
if let Some(prev_id) = &cached_tokens.result_id
&& *prev_id == params.previous_result_id
{
let delta = to_proto::semantic_token_delta(cached_tokens, &semantic_tokens);
*cached_tokens = semantic_tokens;
return Ok(Some(delta.into()));
if let Some(prev_id) = &cached_tokens.result_id {
if *prev_id == params.previous_result_id {
let delta = to_proto::semantic_token_delta(cached_tokens, &semantic_tokens);
*cached_tokens = semantic_tokens;
return Ok(Some(delta.into()));
}
}
*cached_tokens = semantic_tokens.clone();

View file

@ -37,7 +37,7 @@ pub mod line_endings;
pub mod lsp_ext;
mod mem_docs;
pub mod memory_usage;
// @fb-only: mod meta_only;
// @fb-only
mod op_queue;
mod project_loader;
pub mod reload;
@ -108,7 +108,7 @@ pub fn otp_file_to_ignore(db: &Analysis, file_id: FileId) -> bool {
"redbug_dtop",
]
.iter()
// @fb-only: .chain(meta_only::FILES_TO_IGNORE.iter())
// @fb-only
.map(SmolStr::new)
.collect();
}
@ -159,16 +159,12 @@ pub fn read_lint_config_file(project: &Path, config_file: &Option<String>) -> Re
mod tests {
use elp_ide::FunctionMatch;
use elp_ide::diagnostics::DiagnosticCode;
use elp_ide::diagnostics::ErlangServiceConfig;
use elp_ide::diagnostics::Lint;
use elp_ide::diagnostics::LintsFromConfig;
use elp_ide::diagnostics::MatchSsr;
use elp_ide::diagnostics::ReplaceCall;
use elp_ide::diagnostics::ReplaceCallAction;
use elp_ide::diagnostics::Replacement;
use elp_ide::diagnostics::Severity;
use expect_test::expect;
use fxhash::FxHashMap;
use crate::LintConfig;
@ -189,23 +185,12 @@ mod tests {
perm: vec![1, 2],
}),
}),
Lint::LintMatchSsr(MatchSsr {
ssr_pattern: "ssr: _@A = 10.".to_string(),
message: None,
strategy: None,
severity: None,
}),
],
},
linters: FxHashMap::default(),
erlang_service: ErlangServiceConfig::default(),
};
expect![[r#"
enabled_lints = ["W0011"]
disabled_lints = []
[erlang_service]
warnings_as_errors = false
[[ad_hoc_lints.lints]]
type = "ReplaceCall"
@ -229,12 +214,6 @@ mod tests {
action = "Replace"
type = "ArgsPermutation"
perm = [1, 2]
[[ad_hoc_lints.lints]]
type = "LintMatchSsr"
ssr_pattern = "ssr: _@A = 10."
[linters]
"#]]
.assert_eq(&toml::to_string::<LintConfig>(&lint_config).unwrap());
}
@ -248,21 +227,4 @@ mod tests {
assert_eq!(config.enabled_lints, vec![]);
assert_eq!(config.disabled_lints, vec![]);
}
#[test]
fn serde_read_lint_config_linters_overrides() {
let content = r#"
[linters.no_garbage_collect]
severity = "error"
"#;
let config = toml::from_str::<LintConfig>(content).unwrap();
assert_eq!(
config
.linters
.get(&DiagnosticCode::NoGarbageCollect)
.unwrap()
.severity,
Some(Severity::Error)
);
}
}

View file

@ -148,10 +148,11 @@ impl Runnable {
}
}
pub fn rebar3_ct(
pub fn rebar3_test(
runnable: elp_ide::Runnable,
location: Option<lsp_types::LocationLink>,
workspace_root: PathBuf,
_coverage_enabled: bool,
) -> Self {
Self {
label: "Rebar3".to_string(),
@ -164,23 +165,6 @@ impl Runnable {
}),
}
}
pub fn rebar3_shell(
_runnable: elp_ide::Runnable,
location: Option<lsp_types::LocationLink>,
workspace_root: PathBuf,
) -> Self {
Self {
label: "Rebar3".to_string(),
location,
kind: RunnableKind::Rebar3,
args: RunnableArgs::Rebar3(Rebar3RunnableArgs {
workspace_root,
command: "as".to_string(),
args: vec!["test".to_string(), "shell".to_string()],
}),
}
}
}
#[derive(Serialize, Deserialize, Debug)]

View file

@ -18,7 +18,6 @@ use elp_log::telemetry;
use elp_project_model::ElpConfig;
use elp_project_model::IncludeParentDirs;
use elp_project_model::ProjectManifest;
use elp_project_model::buck::BuckQueryConfig;
use elp_project_model::otp::Otp;
use fxhash::FxHashMap;
use fxhash::FxHashSet;
@ -50,15 +49,12 @@ impl ProjectLoader {
let mut result = false;
for path in paths {
let mut path_it: &AbsPath = path.as_ref();
loop {
if self.project_roots.remove(path_it).is_some() {
while let Some(path) = path_it.parent() {
if self.project_roots.remove(path).is_some() {
result = true;
break;
}
match path_it.parent() {
Some(parent) => path_it = parent,
None => break,
}
path_it = path;
}
}
result
@ -99,16 +95,12 @@ impl ProjectLoader {
path: &AbsPath,
) -> Option<(ElpConfig, Result<ProjectManifest>, ProjectManifest)> {
let mut path_it = path;
loop {
if self.project_roots.contains_key(path_it) {
while let Some(path) = path_it.parent() {
if self.project_roots.contains_key(path) {
return None;
}
match path_it.parent() {
Some(parent) => path_it = parent,
None => break,
}
path_it = path;
}
Some(self.load_manifest(path))
}
@ -122,33 +114,12 @@ impl ProjectLoader {
}
}
/// If using buck quick start, it happens in two stages, first to
/// get the basic project config, then to invoke the generation of
/// any artifacts that will become part of the project.
#[derive(Debug, PartialEq, Eq)]
pub enum BuckGenerated {
/// Initial value
NoLoadDone,
/// After first load (buck targets)
NoGenerated,
/// After second load (elp.bxl)
Generated,
}
pub struct ReloadManager {
/// Files that have changed since the last reload.
changed_files: FxHashSet<AbsPathBuf>,
/// This field is updated when a `changed_files` file is added.
/// It allows us to wait until the last file has been added
/// when a branch switch is done to avoid doing a reload for each.
/// We wait until RELOAD_QUIESCENT_WAIT_TIME has elapsed before doing
/// the reload.
last_change: SystemTime,
/// ReloadManager clients should ensure this is set when a reload
/// task is active, reset when done.
reload_in_progress: bool,
buck_generated: BuckGenerated,
buck_quick_start: bool,
}
/// How long to wait after the last changed file was added before
@ -156,76 +127,24 @@ pub struct ReloadManager {
const RELOAD_QUIESCENT_WAIT_TIME: Duration = Duration::from_millis(500);
impl ReloadManager {
pub fn new(buck_quick_start: bool) -> ReloadManager {
pub fn new() -> ReloadManager {
ReloadManager {
changed_files: FxHashSet::default(),
last_change: SystemTime::now(),
reload_in_progress: false,
buck_generated: BuckGenerated::NoLoadDone,
buck_quick_start,
}
}
/// Used to check if any files are queued, and if so cancel an
/// existing reload
pub fn ok_to_switch_workspace(&self) -> bool {
if self.buck_quick_start {
// `BuckGenerated::NoLoadDone` or `BuckGenerated::NoGenerated`.
if self.buck_generated == BuckGenerated::NoLoadDone {
// We are doing a 2-stage load, and have just completed the `buck targets` step.
// So time to activate the Project, this is the whole point of the two stage process
true
} else {
self.changed_files.is_empty()
}
} else {
// Do not switch if there are files which will trigger a reload.
// This lets us start that process sooner without wasted effort
// switching when it is going to change anyway.
self.changed_files.is_empty()
}
pub fn has_changed_files(&self) -> bool {
!self.changed_files.is_empty()
}
pub fn set_reload_active(&mut self) -> BuckQueryConfig {
pub fn set_reload_active(&mut self) {
self.reload_in_progress = true;
self.get_query_config()
}
pub fn get_query_config(&self) -> BuckQueryConfig {
if self.buck_quick_start {
match self.buck_generated {
BuckGenerated::NoLoadDone => BuckQueryConfig::BuckTargetsOnly,
BuckGenerated::NoGenerated => BuckQueryConfig::BuildGeneratedCode,
BuckGenerated::Generated => BuckQueryConfig::BuildGeneratedCode,
}
} else {
BuckQueryConfig::BuildGeneratedCode
}
}
/// This is called when the `Task::FetchProject` is done in `server.rs`,
/// but only after `switch_workspace_ok` has returned true.
pub fn set_reload_done(&mut self, a_file_per_project: FxHashSet<AbsPathBuf>) {
if self.buck_quick_start {
match &self.buck_generated {
BuckGenerated::NoLoadDone => {
if self.changed_files.is_empty() && !a_file_per_project.is_empty() {
// We have done the initial "buck targets" query on at least one Project,
// move on to doing `elp.bxl`
self.buck_generated = BuckGenerated::NoGenerated;
self.changed_files = a_file_per_project;
} else {
// We already have changed files from another source, so
// need to repeat this step. Do not change state.
}
}
BuckGenerated::NoGenerated => {
self.buck_generated = BuckGenerated::Generated;
}
BuckGenerated::Generated => {}
};
}
pub fn set_reload_done(&mut self) {
self.reload_in_progress = false;
}
@ -255,8 +174,4 @@ impl ReloadManager {
self.last_change = SystemTime::now();
}
}
pub fn set_buck_quickstart(&mut self, buck_quick_start: bool) {
self.buck_quick_start = buck_quick_start;
}
}

View file

@ -9,6 +9,7 @@
*/
use std::cmp;
use std::iter;
use elp_ide::elp_ide_db::elp_base_db::FileSetConfig;
use elp_ide::elp_ide_db::elp_base_db::ProjectApps;
@ -51,18 +52,14 @@ impl ProjectFolders {
.all_apps
.iter()
.flat_map(|(project_id, app)| {
let mut paths = Vec::new();
// Add all_dirs_to_watch() with project_id check and glob
for root in app.all_dirs_to_watch() {
if Some(*project_id) != project_apps.otp_project_id {
paths.push(format!("{root}/**/*.{{e,h}}rl"));
}
iter::repeat(project_id).zip(app.all_source_and_include_dirs())
})
.filter_map(|(project_id, root)| {
if Some(*project_id) != project_apps.otp_project_id {
Some(format!("{}/**/*.{{e,h}}rl", root))
} else {
None
}
// Add all_files_to_watch() directly, no project_id check or glob
for file in app.all_files_to_watch() {
paths.push(file.to_string());
}
paths
})
.collect();

View file

@ -1 +0,0 @@
Project Initialisation Failed: invalid or missing buck 2 configuration

View file

@ -1,7 +1,9 @@
[check_include_separate_1/include/include_with_bug.hrl] 5:14-5:18: E1507: undefined macro 'FOO'
[check_include_separate_1/include/top_includer.hrl] 3:10-3:30: E1516: can't find include file "does_not_exist.hrl"
Diagnostics reported:
Reporting all diagnostics codes
check_include/src/top_includer.erl:14:5-14:11::[Error] [E1508] undefined macro 'THIRD/2'
check_include/src/top_includer.erl:6:1-6:67::[Error] [L0000] Issue in included file
module specified: top_includer
Diagnostics reported in 2 modules:
app_a: 3
0:0-0:0::[Error] [W0012] Please add "-compile(warn_missing_spec_all)." to the module. If exported functions are not all specced, they need to be specced.
5:9-5:31::[WeakWarning] [W0037] Unspecific include.
0:8-0:13::[WeakWarning] [W0046] The module is not documented.
auto_gen_a: 2
0:0-0:0::[Error] [W0012] Please add "-compile(warn_missing_spec_all)." to the module. If exported functions are not all specced, they need to be specced.
0:8-0:18::[WeakWarning] [W0046] The module is not documented.

View file

@ -1,9 +1,4 @@
{
"elp.buck.quickStart": {
"default": false,
"markdownDescription": "Whether to use the expermintal `buck2 targets` quick start process.",
"type": "boolean"
},
"elp.diagnostics.disabled": {
"default": [],
"items": {
@ -93,11 +88,6 @@
"markdownDescription": "Whether to show the `Link` lenses. Only applies when\n`#elp.lens.enable#` is set.",
"type": "boolean"
},
"elp.lens.logview.links": {
"default": false,
"markdownDescription": "Whether to enable LogView lens links.",
"type": "boolean"
},
"elp.lens.run.coverage.enable": {
"default": true,
"markdownDescription": "Display code coverage information when running tests via the\nCode Lenses. Only applies when `#elp.lens.enabled` and\n`#elp.lens.run.enable#` are set.",
@ -113,16 +103,6 @@
"markdownDescription": "Whether to show the `Run Interactive` lenses. Only applies when\n`#elp.lens.enable#` is set.",
"type": "boolean"
},
"elp.lens.scuba.links": {
"default": false,
"markdownDescription": "Whether to enable Scuba lens links.",
"type": "boolean"
},
"elp.lens.wam.links": {
"default": false,
"markdownDescription": "Whether to enable WAM lens links.",
"type": "boolean"
},
"elp.log": {
"default": "error",
"markdownDescription": "Configure LSP-based logging using env_logger syntax.",

View file

@ -1,138 +0,0 @@
Reporting all diagnostics codes
Diagnostics reported:
app_a/src/app_a.erl:52:3-52:23::[Warning] [W0006] this statement has no effect
app_a/src/app_a.erl:3:10-3:21::[WeakWarning] [W0037] Unspecific include.
app_a/src/app_a.erl:27:3-27:9::[Warning] [W0017] Function 'foo:ok/0' is undefined.
app_a/src/app_a.erl:28:4-28:11::[Warning] [W0017] Function 'mod:foo/0' is undefined.
app_a/src/app_a.erl:72:4-72:11::[Warning] [W0017] Function 'foo:bar/2' is undefined.
app_a/src/app_a.erl:37:11-37:28::[Warning] [W0017] Function 'mod_name:fun_name/2' is undefined.
app_a/src/app_a.erl:58:11-58:24::[WeakWarning] [W0051] Binary string can be written using sigil syntax.
app_a/src/app_a.erl:4:1-4:41::[Warning] [W0020] Unused file: inets/include/httpd.hrl
app_a/src/app_a.erl:39:7-39:28::[Error] [L1267] variable 'A' shadowed in 'named fun'
app_a/src/app_a.erl:55:32-55:35::[Error] [L1295] type uri/0 undefined
app_a/src/app_a.erl:56:20-56:26::[Error] [L1295] type binary/1 undefined
app_a/src/app_a.erl:72:3-72:34::[Error] [L1252] record record undefined
app_a/src/app_a.erl:75:5-75:16::[Error] [L1252] record record undefined
app_a/src/app_a.erl:35:1-35:2::[Warning] [L1230] function g/1 is unused
app_a/src/app_a.erl:35:3-35:4::[Warning] [L1268] variable 'A' is unused
app_a/src/app_a.erl:36:3-36:4::[Warning] [L1268] variable 'F' is unused
app_a/src/app_a.erl:37:3-37:4::[Warning] [L1268] variable 'G' is unused
app_a/src/app_a.erl:38:3-38:4::[Warning] [L1268] variable 'H' is unused
app_a/src/app_a.erl:39:3-39:4::[Warning] [L1268] variable 'I' is unused
app_a/src/app_a.erl:39:7-39:28::[Warning] [L1268] variable 'A' is unused
app_a/src/app_a.erl:41:1-41:2::[Warning] [L1230] function h/0 is unused
app_a/src/app_a.erl:45:1-45:2::[Warning] [L1230] function i/0 is unused
app_a/src/app_a.erl:50:1-50:2::[Warning] [L1230] function j/2 is unused
app_a/src/app_a.erl:50:15-50:16::[Warning] [L1268] variable 'A' is unused
app_a/src/app_a.erl:50:23-50:24::[Warning] [L1268] variable 'B' is unused
app_a/src/app_a.erl:55:1-55:46::[Warning] [L1296] type session(_) is unused
app_a/src/app_a.erl:55:1-55:46::[Warning] [L1313] opaque type session(_) is not exported
app_a/src/app_a.erl:56:7-56:13::[Warning] [L1296] type source(_) is unused
app_a/src/app_a.erl:58:1-58:4::[Warning] [L1230] function map/2 is unused
app_a/src/app_a.erl:60:1-60:9::[Warning] [L1230] function with_dot/0 is unused
app_a/src/app_a.erl:62:1-62:9::[Warning] [L1230] function lang_dir/1 is unused
app_a/src/app_a.erl:66:1-66:7::[Warning] [L1230] function escape/1 is unused
app_a/src/app_a.erl:66:13-66:17::[Warning] [L1268] variable 'T' is unused
app_a/src/app_a.erl:67:9-67:25::[Warning] [L1260] record all_configs_file is unused
app_a/src/app_a.erl:71:1-71:2::[Warning] [L1230] function k/0 is unused
app_a/src/app_a.erl:74:1-74:2::[Warning] [L1230] function l/1 is unused
app_a/src/app_a.erl:77:1-77:2::[Warning] [L1230] function m/0 is unused
app_a/src/broken_parse_trans.erl:10:21-10:22::[Error] [L1256] field b undefined in record a
app_a/src/broken_parse_trans.erl:10:32-10:33::[Error] [L1262] variable 'B' is unbound
app_a/src/cascading.erl:9:5-9:6::[Error] [W0004] Missing ')'
3:10-3:15: function foo/0 undefined
6:10-6:15: function foo/0 undefined
8:7-8:10: spec for undefined function foo/0
app_a/src/diagnostics.erl:3:10-3:27::[WeakWarning] [W0037] Unspecific include.
app_a/src/diagnostics.erl:4:10-4:34::[WeakWarning] [W0037] Unspecific include.
app_a/src/diagnostics.erl:12:8-12:12::[Warning] [W0060] Match on a bound variable
app_a/src/diagnostics.erl:4:1-4:36::[Error] [L0000] Issue in included file
[app_a/include/broken_diagnostics.hrl] 1:8-1:15: P1702: bad attribute
[app_a/include/broken_diagnostics.hrl] 3:6-3:15: P1702: bad attribute
app_a/src/diagnostics.erl:6:31-6:45::[Error] [L1295] type undefined_type/0 undefined
app_a/src/diagnostics.erl:7:1-7:5::[Warning] [L1230] function main/1 is unused
app_a/src/diagnostics.erl:10:1-10:4::[Warning] [L1230] function foo/0 is unused
app_a/src/lint_recursive.erl:23:5-23:14::[Warning] [W0006] this statement has no effect
app_a/src/lint_recursive.erl:6:5-6:7::[Warning] [W0006] this statement has no effect
app_a/src/lint_recursive.erl:14:5-14:12::[Warning] [L1268] variable 'Config1' is unused
app_a/src/lint_recursive.erl:19:5-19:12::[Warning] [L1268] variable 'Config1' is unused
app_a/src/lints.erl:5:1-5:14::[Error] [P1700] head mismatch 'head_mismatcX' vs 'head_mismatch'
4:1-4:14: Mismatched clause name
app_a/src/lints.erl:4:22-4:23::[Warning] [W0018] Unexpected ';'
app_a/src/lints.erl:2:10-2:25::[Error] [L1227] function head_mismatch/1 undefined
app_a/src/otp27_docstrings.erl:34:9-34:24::[Warning] [W0002] Unused macro (THIS_IS_THE_END)
app_a/src/otp27_docstrings.erl:24:5-24:6::[Warning] [W0060] Match on a bound variable
app_a/src/otp27_docstrings.erl:30:5-30:6::[Warning] [W0060] Match on a bound variable
app_a/src/otp27_sigils.erl:11:6-11:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:12:5-12:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:13:5-13:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:14:5-14:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:15:5-15:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:17:6-17:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:18:5-18:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:19:5-19:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:20:5-20:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:21:5-21:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:23:6-23:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:24:5-24:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:25:5-25:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:26:5-26:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:27:5-27:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:29:6-29:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:30:5-30:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:31:5-31:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:32:5-32:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:33:5-33:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:35:6-35:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:36:5-36:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:37:5-37:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:38:5-38:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:39:5-39:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:41:6-41:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:42:5-42:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:43:5-43:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:44:5-44:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:45:5-45:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:47:6-47:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:48:5-48:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:49:5-49:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:50:5-50:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:51:5-51:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:53:6-53:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:53:6-53:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:54:5-54:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:54:5-54:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:55:5-55:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:55:5-55:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:56:5-56:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:57:5-57:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:59:6-59:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:60:5-60:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:61:5-61:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:62:5-62:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:63:5-63:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:65:6-65:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:66:5-66:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:67:5-67:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:68:5-68:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:69:5-69:24::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:76:5-79:8::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:76:5-79:8::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:80:5-84:9::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:80:5-84:9::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:85:5-89:10::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:85:5-89:10::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:90:5-94:11::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:95:5-99:12::[Warning] [W0006] this statement has no effect
app_a/src/otp27_sigils.erl:102:5-102:24::[WeakWarning] [W0051] Binary string can be written using sigil syntax.
app_a/src/otp27_sigils.erl:128:9-128:24::[Warning] [W0002] Unused macro (THIS_IS_THE_END)
app_a/src/otp27_sigils.erl:112:4-112:5::[Error] [P1711] syntax error before: X
4:15-4:18: function g/0 undefined
74:7-74:8: spec for undefined function g/0
app_a/src/otp27_sigils.erl:71:5-71:6::[Warning] [L1268] variable 'X' is unused
app_a/src/otp_7655.erl:5:1-5:28::[Error] [L1201] no module definition
app_a/src/parse_error_a_cascade.erl:10:20-11:1::[Error] [W0004] Missing 'atom'
6:6-6:11: function bar/0 undefined
app_a/src/suppressed.erl:8:5-8:9::[Warning] [L1268] variable 'Life' is unused
app_a/src/syntax.erl:5:46-5:47::[Error] [P1711] syntax error before: ')'
app_a/src/syntax.erl:11:9-11:10::[Error] [W0004] Missing ')'

View file

@ -1,3 +1,4 @@
module specified: suppressed
Diagnostics reported:
app_a/src/suppressed.erl:8:5-8:9::[Warning] [W0007] match is redundant
Diagnostics reported in 1 modules:
suppressed: 1
7:4-7:8::[Warning] [W0007] match is redundant

View file

@ -1,10 +1,9 @@
module specified: diagnostics
Diagnostics reported in 1 modules:
diagnostics: 7
diagnostics: 6
2:9-2:26::[Hint] [W0037] Unspecific include.
3:0-3:35::[Error] [L0000] Issue in included file
3:0-3:0::[Error] [L0000] Issue in included file
3:9-3:33::[Hint] [W0037] Unspecific include.
5:30-5:44::[Error] [L1295] type undefined_type/0 undefined
6:0-6:4::[Warning] [L1230] function main/1 is unused
9:0-9:3::[Warning] [L1230] function foo/0 is unused
11:7-11:11::[Warning] [W0060] Match on a bound variable

View file

@ -1,5 +0,0 @@
module specified: diagnostics
Diagnostics reported in 1 modules:
diagnostics: 2
3:0-3:35::[Error] [L0000] Issue in included file
5:30-5:44::[Error] [L1295] type undefined_type/0 undefined

View file

@ -1,7 +1,6 @@
{"path":"app_a/src/diagnostics.erl","line":3,"char":10,"code":"ELP","severity":"disabled","name":"W0037 (unspecific_include)","original":null,"replacement":null,"description":"Unspecific include.\n\nFor more information see: /erlang-error-index/w/W0037","docPath":"website/docs/erlang-error-index/w/W0037.md"}
{"path":"app_a/src/diagnostics.erl","line":4,"char":1,"code":"ELP","severity":"error","name":"L0000 (L0000)","original":null,"replacement":null,"description":"Issue in included file\n\nFor more information see: /erlang-error-index/l/L0000","docPath":null}
{"path":"app_a/src/diagnostics.erl","line":4,"char":10,"code":"ELP","severity":"disabled","name":"W0037 (unspecific_include)","original":null,"replacement":null,"description":"Unspecific include.\n\nFor more information see: /erlang-error-index/w/W0037","docPath":"website/docs/erlang-error-index/w/W0037.md"}
{"path":"app_a/src/diagnostics.erl","line":6,"char":31,"code":"ELP","severity":"error","name":"L1295 (L1295)","original":null,"replacement":null,"description":"type undefined_type/0 undefined\n\nFor more information see: /erlang-error-index/l/L1295","docPath":null}
{"path":"app_a/src/diagnostics.erl","line":7,"char":1,"code":"ELP","severity":"warning","name":"L1230 (L1230)","original":null,"replacement":null,"description":"function main/1 is unused\n\nFor more information see: /erlang-error-index/l/L1230","docPath":null}
{"path":"app_a/src/diagnostics.erl","line":10,"char":1,"code":"ELP","severity":"warning","name":"L1230 (L1230)","original":null,"replacement":null,"description":"function foo/0 is unused\n\nFor more information see: /erlang-error-index/l/L1230","docPath":null}
{"path":"app_a/src/diagnostics.erl","line":12,"char":8,"code":"ELP","severity":"warning","name":"W0060 (bound_var_in_lhs)","original":null,"replacement":null,"description":"Match on a bound variable\n\nFor more information see: /erlang-error-index/w/W0060","docPath":"website/docs/erlang-error-index/w/W0060.md"}
{"path":"app_a/src/diagnostics.erl","line":3,"char":10,"code":"ELP","severity":"disabled","name":"W0037 (unspecific_include)","original":null,"replacement":null,"description":"Unspecific include.\n\nFor more information see: /erlang-error-index/w/W0037"}
{"path":"app_a/src/diagnostics.erl","line":4,"char":1,"code":"ELP","severity":"error","name":"L0000 (L0000)","original":null,"replacement":null,"description":"Issue in included file\n\nFor more information see: /erlang-error-index/l/L0000"}
{"path":"app_a/src/diagnostics.erl","line":4,"char":10,"code":"ELP","severity":"disabled","name":"W0037 (unspecific_include)","original":null,"replacement":null,"description":"Unspecific include.\n\nFor more information see: /erlang-error-index/w/W0037"}
{"path":"app_a/src/diagnostics.erl","line":6,"char":31,"code":"ELP","severity":"error","name":"L1295 (L1295)","original":null,"replacement":null,"description":"type undefined_type/0 undefined\n\nFor more information see: /erlang-error-index/l/L1295"}
{"path":"app_a/src/diagnostics.erl","line":7,"char":1,"code":"ELP","severity":"warning","name":"L1230 (L1230)","original":null,"replacement":null,"description":"function main/1 is unused\n\nFor more information see: /erlang-error-index/l/L1230"}
{"path":"app_a/src/diagnostics.erl","line":10,"char":1,"code":"ELP","severity":"warning","name":"L1230 (L1230)","original":null,"replacement":null,"description":"function foo/0 is unused\n\nFor more information see: /erlang-error-index/l/L1230"}

View file

@ -1,12 +1,11 @@
module specified: lints
Diagnostics reported:
app_a/src/lints.erl:5:1-5:14::[Error] [P1700] head mismatch 'head_mismatcX' vs 'head_mismatch'
4:1-4:14: Mismatched clause name
Diagnostics reported in 1 modules:
lints: 1
4:0-4:13::[Error] [P1700] head mismatch 'head_mismatcX' vs 'head_mismatch'
---------------------------------------------
Applying fix in module 'lints' for
5:1-5:14::[Error] [P1700] head mismatch 'head_mismatcX' vs 'head_mismatch'
4:1-4:14: Mismatched clause name
4:0-4:13::[Error] [P1700] head mismatch 'head_mismatcX' vs 'head_mismatch'
@@ -1,6 +1,6 @@
-module(lints).
-export([head_mismatch/1]).

View file

@ -1 +1 @@
{"path":"app_a/src/lints.erl","line":5,"char":1,"code":"ELP","severity":"error","name":"P1700 (head_mismatch)","original":null,"replacement":null,"description":"head mismatch 'head_mismatcX' vs 'head_mismatch'\n\nFor more information see: /erlang-error-index/p/P1700","docPath":null}
{"path":"app_a/src/lints.erl","line":5,"char":1,"code":"ELP","severity":"error","name":"P1700 (head_mismatch)","original":null,"replacement":null,"description":"head mismatch 'head_mismatcX' vs 'head_mismatch'\n\nFor more information see: /erlang-error-index/p/P1700"}

View file

@ -1,11 +1,12 @@
module specified: lint_recursive
Diagnostics reported:
app_a/src/lint_recursive.erl:19:5-19:12::[Warning] [W0007] match is redundant
app_a/src/lint_recursive.erl:14:5-14:12::[Warning] [W0007] match is redundant
Diagnostics reported in 1 modules:
lint_recursive: 2
18:4-18:11::[Warning] [W0007] match is redundant
13:4-13:11::[Warning] [W0007] match is redundant
---------------------------------------------
Applying fix in module 'lint_recursive' for
19:5-19:12::[Warning] [W0007] match is redundant
18:4-18:11::[Warning] [W0007] match is redundant
@@ -16,7 +16,7 @@
test_foo2(Config) ->
@ -20,12 +21,12 @@ Applying fix in module 'lint_recursive' for
New filtered diagnostics
lint_recursive: 2
14:5-14:12::[Warning] [W0007] match is redundant
19:5-19:11::[Warning] [W0006] this statement has no effect
13:4-13:11::[Warning] [W0007] match is redundant
18:4-18:10::[Warning] [W0006] this statement has no effect
---------------------------------------------
Applying fix in module 'lint_recursive' for
14:5-14:12::[Warning] [W0007] match is redundant
13:4-13:11::[Warning] [W0007] match is redundant
@@ -11,7 +11,7 @@
%% something/0.
test_foo(Config) ->
@ -40,12 +41,12 @@ Applying fix in module 'lint_recursive' for
New filtered diagnostics
lint_recursive: 2
19:5-19:11::[Warning] [W0006] this statement has no effect
14:5-14:11::[Warning] [W0006] this statement has no effect
18:4-18:10::[Warning] [W0006] this statement has no effect
13:4-13:10::[Warning] [W0006] this statement has no effect
---------------------------------------------
Applying fix in module 'lint_recursive' for
19:5-19:11::[Warning] [W0006] this statement has no effect
18:4-18:10::[Warning] [W0006] this statement has no effect
@@ -16,7 +16,6 @@
test_foo2(Config) ->
@ -59,12 +60,12 @@ Applying fix in module 'lint_recursive' for
New filtered diagnostics
lint_recursive: 2
17:11-17:17::[Warning] [W0010] this variable is unused
14:5-14:11::[Warning] [W0006] this statement has no effect
16:10-16:16::[Warning] [W0010] this variable is unused
13:4-13:10::[Warning] [W0006] this statement has no effect
---------------------------------------------
Applying fix in module 'lint_recursive' for
17:11-17:17::[Warning] [W0010] this variable is unused
16:10-16:16::[Warning] [W0010] this variable is unused
@@ -14,7 +14,7 @@
Config,
clean_mocks().
@ -79,11 +80,11 @@ Applying fix in module 'lint_recursive' for
New filtered diagnostics
lint_recursive: 1
14:5-14:11::[Warning] [W0006] this statement has no effect
13:4-13:10::[Warning] [W0006] this statement has no effect
---------------------------------------------
Applying fix in module 'lint_recursive' for
14:5-14:11::[Warning] [W0006] this statement has no effect
13:4-13:10::[Warning] [W0006] this statement has no effect
@@ -11,7 +11,6 @@
%% something/0.
test_foo(Config) ->
@ -97,11 +98,11 @@ Applying fix in module 'lint_recursive' for
New filtered diagnostics
lint_recursive: 1
12:10-12:16::[Warning] [W0010] this variable is unused
11:9-11:15::[Warning] [W0010] this variable is unused
---------------------------------------------
Applying fix in module 'lint_recursive' for
12:10-12:16::[Warning] [W0010] this variable is unused
11:9-11:15::[Warning] [W0010] this variable is unused
@@ -9,7 +9,7 @@
%% We want to check that the "no effect" statements in test_foo/1 and
%% test_foo2/1 are removed, but not the ones in clean_mocks/0 and

View file

@ -1,6 +1,4 @@
module specified: otp27_docstrings
Diagnostics reported in 1 modules:
otp27_docstrings: 3
23:4-23:5::[Warning] [W0060] Match on a bound variable
29:4-29:5::[Warning] [W0060] Match on a bound variable
otp27_docstrings: 1
33:8-33:23::[Warning] [W0002] Unused macro (THIS_IS_THE_END)

View file

@ -1,2 +0,0 @@
module specified: erlang_diagnostics_errors_gen
No matches found

View file

@ -1,5 +0,0 @@
module specified: erlang_diagnostics_errors_gen
erlang_diagnostics_errors_gen: 1
6:5-6:7::[WeakWarning] [ad-hoc: ssr-match] SSR pattern matched: ssr: ok.
Matches found in 1 modules

View file

@ -1,10 +1,11 @@
Usage: [--project PROJECT] [--as PROFILE] [[--format FORMAT]] [--rebar] [--bail-on-error] [--stats] [--list-modules]
Usage: [--project PROJECT] [--as PROFILE] [[--format FORMAT]] [--rebar] [--include-tests] [--bail-on-error] [--stats] [--list-modules]
Available options:
--project <PROJECT> Path to directory with project, or to a JSON file (defaults to `.`)
--as <PROFILE> Rebar3 profile to pickup (default is test)
--format <FORMAT> Show diagnostics in JSON format
--rebar Run with rebar
--include-tests Also eqwalize test modules from project
--bail-on-error Exit with a non-zero status code if any errors are found
--stats Print statistics when done
--list-modules When printing statistics, include the list of modules parsed

View file

@ -1,4 +1,4 @@
Usage: [--project PROJECT] [--as PROFILE] [--rebar] [--bail-on-error] <APP>
Usage: [--project PROJECT] [--as PROFILE] [--include-tests] [--rebar] [--bail-on-error] <APP>
Available positional items:
<APP> app name
@ -6,6 +6,7 @@ Available positional items:
Available options:
--project <PROJECT> Path to directory with project, or to a JSON file (defaults to `.`)
--as <PROFILE> Rebar3 profile to pickup (default is test)
--include-tests Also eqwalize test modules from project
--rebar Run with rebar
--bail-on-error Exit with a non-zero status code if any errors are found
-h, --help Prints help information

View file

@ -1,9 +1,10 @@
Usage: [--project PROJECT] [--bail-on-error] <TARGET>
Usage: [--project PROJECT] [--include-tests] [--bail-on-error] <TARGET>
Available positional items:
<TARGET> target, like //erl/chatd/...
Available options:
--project <PROJECT> Path to directory with project, or to a JSON file (defaults to `.`)
--include-tests Also eqwalize test modules from project
--bail-on-error Exit with a non-zero status code if any errors are found
-h, --help Prints help information

View file

@ -1,9 +0,0 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ apps/app_b/src/app_b.erl:5:15
5 │ test_fun() -> type_error.
│ ^^^^^^^^^^ 'type_error'.
Expression has type: 'type_error'
Context expected type: 'ok'
1 ERROR

View file

@ -37,6 +37,12 @@ Because in the expression's type:
Here the type is: fun((term()) -> term())
Context expects type: fun((term(), term()) -> term())
------------------------------ Detailed message ------------------------------
fun((term()) -> term()) is not compatible with f2()
because
fun((term()) -> term()) is not compatible with fun((term(), term()) -> term())
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/any_fun_type.erl:64:1
@ -61,6 +67,16 @@ Because in the expression's type:
Differs from the expected type: 'a'
)
------------------------------ Detailed message ------------------------------
f5('a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with fun((...) -> 'a')
because
'a' | 'b' is not compatible with 'a'
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:98:20
@ -87,4 +103,14 @@ Because in the expression's type:
Differs from the expected type: 'a'
)
------------------------------ Detailed message ------------------------------
fun((term()) -> 'a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with fun((...) -> 'a')
because
'a' | 'b' is not compatible with 'a'
because
'b' is not compatible with 'a'
7 ERRORS

View file

@ -0,0 +1,116 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:46:21
46 │ to_f_any_neg2(F) -> F.
│ ^
│ │
│ F.
Expression has type: 'f0' | fun((atom()) -> pid()) | 'f1'
Context expected type: fun()
Because in the expression's type:
Here the type is a union type with some valid candidates: fun((atom()) -> pid())
However the following candidate: 'f0'
Differs from the expected type: fun()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:49:23
49 │ f_any_to_f0_neg(F) -> F.
│ ^ F.
Expression has type: fun()
Context expected type: 'f0'
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:61:31
61 │ when is_function(F, 1) -> F.
│ ^
│ │
│ F.
Expression has type: fun((term()) -> term())
Context expected type: f2()
Because in the expression's type:
Here the type is: fun((term()) -> term())
Context expects type: fun((term(), term()) -> term())
------------------------------ Detailed message ------------------------------
fun((term()) -> term()) is not compatible with f2()
because
fun((term()) -> term()) is not compatible with fun((term(), term()) -> term())
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/any_fun_type.erl:64:1
64 │ a_to_a(a) -> a.
│ ^^^^^^^^^^^^^^ Clause is not covered by spec
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:92:24
92 │ f5_to_f4_cov_neg(F) -> F.
│ ^
│ │
│ F.
Expression has type: f5('a' | 'b')
Context expected type: f4('a')
Because in the expression's type:
fun((term()) ->
Here the type is a union type with some valid candidates: 'a'
However the following candidate: 'b'
Differs from the expected type: 'a'
)
------------------------------ Detailed message ------------------------------
f5('a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with fun((...) -> 'a')
because
'a' | 'b' is not compatible with 'a'
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:98:20
98 │ apply_f4_neg(F) -> F(a).
│ ^^^^ F('a').
Expression has type: number()
Context expected type: boolean()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:113:22
113 │ fun3_to_f4_neg(F) -> F.
│ ^
│ │
│ F.
Expression has type: fun((term()) -> 'a' | 'b')
Context expected type: f4('a')
Because in the expression's type:
fun((term()) ->
Here the type is a union type with some valid candidates: 'a'
However the following candidate: 'b'
Differs from the expected type: 'a'
)
------------------------------ Detailed message ------------------------------
fun((term()) -> 'a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with fun((...) -> 'a')
because
'a' | 'b' is not compatible with 'a'
because
'b' is not compatible with 'a'
7 ERRORS

View file

@ -0,0 +1,116 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:46:21
46 │ to_f_any_neg2(F) -> F.
│ ^
│ │
│ F.
Expression has type: 'f0' | fun((atom()) -> pid()) | 'f1'
Context expected type: fun()
Because in the expression's type:
Here the type is a union type with some valid candidates: fun((atom()) -> pid())
However the following candidate: 'f0'
Differs from the expected type: fun()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:49:23
49 │ f_any_to_f0_neg(F) -> F.
│ ^ F.
Expression has type: fun()
Context expected type: 'f0'
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:61:31
61 │ when is_function(F, 1) -> F.
│ ^
│ │
│ F.
Expression has type: fun((term()) -> term())
Context expected type: f2()
Because in the expression's type:
Here the type is: fun((term()) -> term())
Context expects type: fun((term(), term()) -> term())
------------------------------ Detailed message ------------------------------
fun((term()) -> term()) is not compatible with f2()
because
fun((term()) -> term()) is not compatible with fun((term(), term()) -> term())
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/any_fun_type.erl:64:1
64 │ a_to_a(a) -> a.
│ ^^^^^^^^^^^^^^ Clause is not covered by spec
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:92:24
92 │ f5_to_f4_cov_neg(F) -> F.
│ ^
│ │
│ F.
Expression has type: f5('a' | 'b')
Context expected type: f4('a')
Because in the expression's type:
fun((term()) ->
Here the type is a union type with some valid candidates: 'a'
However the following candidate: 'b'
Differs from the expected type: 'a'
)
------------------------------ Detailed message ------------------------------
f5('a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with fun((...) -> 'a')
because
'a' | 'b' is not compatible with 'a'
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:98:20
98 │ apply_f4_neg(F) -> F(a).
│ ^^^^ F('a').
Expression has type: number()
Context expected type: boolean()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/any_fun_type.erl:113:22
113 │ fun3_to_f4_neg(F) -> F.
│ ^
│ │
│ F.
Expression has type: fun((term()) -> 'a' | 'b')
Context expected type: f4('a')
Because in the expression's type:
fun((term()) ->
Here the type is a union type with some valid candidates: 'a'
However the following candidate: 'b'
Differs from the expected type: 'a'
)
------------------------------ Detailed message ------------------------------
fun((term()) -> 'a' | 'b') is not compatible with f4('a')
because
fun((term()) -> 'a' | 'b') is not compatible with fun((...) -> 'a')
because
'a' | 'b' is not compatible with 'a'
because
'b' is not compatible with 'a'
7 ERRORS

View file

@ -0,0 +1,24 @@
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/apply_none.erl:26:1
26 │ ╭ apply_none1(F)
27 │ │ when is_function(F, 1),
28 │ │ is_function(F, 2) ->
29 │ │ F(a).
│ ╰────────^ Clause is not covered by spec
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/apply_none.erl:32:1
32 │ apply_none2(F) -> F(ok).
│ ^^^^^^^^^^^^^^^^^^^^^^^ Clause is not covered by spec
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/apply_none.erl:35:1
35 │ ╭ apply_none3(F) ->
36 │ │ Res = F(ok),
37 │ │ Res.
│ ╰───────^ Clause is not covered by spec
3 ERRORS

View file

@ -0,0 +1,24 @@
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/apply_none.erl:26:1
26 │ ╭ apply_none1(F)
27 │ │ when is_function(F, 1),
28 │ │ is_function(F, 2) ->
29 │ │ F(a).
│ ╰────────^ Clause is not covered by spec
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/apply_none.erl:32:1
32 │ apply_none2(F) -> F(ok).
│ ^^^^^^^^^^^^^^^^^^^^^^^ Clause is not covered by spec
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/apply_none.erl:35:1
35 │ ╭ apply_none3(F) ->
36 │ │ Res = F(ok),
37 │ │ Res.
│ ╰───────^ Clause is not covered by spec
3 ERRORS

View file

@ -60,6 +60,12 @@ Because in the expression's type:
However the following candidate: string()
Differs from the expected type: 'anything'
------------------------------ Detailed message ------------------------------
string() | dynamic() is not compatible with 'anything'
because
string() is not compatible with 'anything'
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/approx.erl:74:1

View file

@ -0,0 +1,105 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/approx.erl:36:5
36 │ {X, X}.
│ ^^^^^^ {X, X}.
Expression has type: {dynamic(), dynamic()}
Context expected type: 'ok'
error: unknown_id (See https://fb.me/eqwalizer_errors#unknown_id)
┌─ check/src/approx.erl:39:1
39 │ generics_with_unions:type_var_from_nowhere().
│ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Unknown id: generics_with_unions:type_var_from_nowhere/0
error: reference_to_invalid_type (See https://fb.me/eqwalizer_errors#reference_to_invalid_type)
┌─ check/src/approx.erl:41:1
41 │ ╭ -spec test_trans_invalid1_neg(
42 │ │ trans_invalid()
43 │ │ ) -> nok.
│ ╰────────^ test_trans_invalid1_neg/1 references type with invalid definition: trans_invalid/0
error: reference_to_invalid_type (See https://fb.me/eqwalizer_errors#reference_to_invalid_type)
┌─ check/src/approx.erl:47:1
47 │ ╭ -spec test_trans_invalid2_neg(
48 │ │ trans_invalid()
49 │ │ ) -> nok.
│ ╰────────^ test_trans_invalid2_neg/1 references type with invalid definition: trans_invalid/0
error: reference_to_invalid_type (See https://fb.me/eqwalizer_errors#reference_to_invalid_type)
┌─ check/src/approx.erl:53:1
53 │ ╭ -spec test_trans_invalid3_neg(
54 │ │ trans_invalid()
55 │ │ ) -> nok.
│ ╰────────^ test_trans_invalid3_neg/1 references type with invalid definition: trans_invalid/0
error: expected_fun_type (See https://fb.me/eqwalizer_errors#expected_fun_type)
┌─ check/src/approx.erl:62:30
62 │ test_opaque_as_fun_neg(X) -> X(ok).
│ ^ X.
Expected fun type with arity 1
Got: misc:o() | fun((term()) -> none())
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/approx.erl:69:3
69 │ Dyn2.
│ ^^^^
│ │
│ Dyn2.
Expression has type: string() | dynamic()
Context expected type: 'anything'
Because in the expression's type:
Here the type is a union type with some valid candidates: dynamic()
However the following candidate: string()
Differs from the expected type: 'anything'
------------------------------ Detailed message ------------------------------
string() | dynamic() is not compatible with 'anything'
because
string() is not compatible with 'anything'
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/approx.erl:74:1
74 │ ╭ dyn_union_1(N, D) ->
75 │ │ Res = case D of
76 │ │ 1 -> N;
77 │ │ 2 -> D
78 │ │ end,
79 │ │ eqwalizer:reveal_type(Res),
80 │ │ Res.
│ ╰─────^ Clause is not covered by spec
error: reveal_type (See https://fb.me/eqwalizer_errors#reveal_type)
┌─ check/src/approx.erl:79:25
79 │ eqwalizer:reveal_type(Res),
│ ^^^ dynamic(number())
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/approx.erl:85:1
85 │ ╭ dyn_union_2(N, D) ->
86 │ │ Res = case D of
87 │ │ 2 -> D;
88 │ │ 1 -> N
89 │ │ end,
90 │ │ eqwalizer:reveal_type(Res),
91 │ │ Res.
│ ╰─────^ Clause is not covered by spec
error: reveal_type (See https://fb.me/eqwalizer_errors#reveal_type)
┌─ check/src/approx.erl:90:25
90 │ eqwalizer:reveal_type(Res),
│ ^^^ dynamic(number())
11 ERRORS

View file

@ -0,0 +1,105 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/approx.erl:36:5
36 │ {X, X}.
│ ^^^^^^ {X, X}.
Expression has type: {dynamic(), dynamic()}
Context expected type: 'ok'
error: unknown_id (See https://fb.me/eqwalizer_errors#unknown_id)
┌─ check/src/approx.erl:39:1
39 │ generics_with_unions:type_var_from_nowhere().
│ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Unknown id: generics_with_unions:type_var_from_nowhere/0
error: reference_to_invalid_type (See https://fb.me/eqwalizer_errors#reference_to_invalid_type)
┌─ check/src/approx.erl:41:1
41 │ ╭ -spec test_trans_invalid1_neg(
42 │ │ trans_invalid()
43 │ │ ) -> nok.
│ ╰────────^ test_trans_invalid1_neg/1 references type with invalid definition: trans_invalid/0
error: reference_to_invalid_type (See https://fb.me/eqwalizer_errors#reference_to_invalid_type)
┌─ check/src/approx.erl:47:1
47 │ ╭ -spec test_trans_invalid2_neg(
48 │ │ trans_invalid()
49 │ │ ) -> nok.
│ ╰────────^ test_trans_invalid2_neg/1 references type with invalid definition: trans_invalid/0
error: reference_to_invalid_type (See https://fb.me/eqwalizer_errors#reference_to_invalid_type)
┌─ check/src/approx.erl:53:1
53 │ ╭ -spec test_trans_invalid3_neg(
54 │ │ trans_invalid()
55 │ │ ) -> nok.
│ ╰────────^ test_trans_invalid3_neg/1 references type with invalid definition: trans_invalid/0
error: expected_fun_type (See https://fb.me/eqwalizer_errors#expected_fun_type)
┌─ check/src/approx.erl:62:30
62 │ test_opaque_as_fun_neg(X) -> X(ok).
│ ^ X.
Expected fun type with arity 1
Got: misc:o() | fun((term()) -> none())
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/approx.erl:69:3
69 │ Dyn2.
│ ^^^^
│ │
│ Dyn2.
Expression has type: string() | dynamic()
Context expected type: 'anything'
Because in the expression's type:
Here the type is a union type with some valid candidates: dynamic()
However the following candidate: string()
Differs from the expected type: 'anything'
------------------------------ Detailed message ------------------------------
string() | dynamic() is not compatible with 'anything'
because
string() is not compatible with 'anything'
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/approx.erl:74:1
74 │ ╭ dyn_union_1(N, D) ->
75 │ │ Res = case D of
76 │ │ 1 -> N;
77 │ │ 2 -> D
78 │ │ end,
79 │ │ eqwalizer:reveal_type(Res),
80 │ │ Res.
│ ╰─────^ Clause is not covered by spec
error: reveal_type (See https://fb.me/eqwalizer_errors#reveal_type)
┌─ check/src/approx.erl:79:25
79 │ eqwalizer:reveal_type(Res),
│ ^^^ dynamic(number())
error: clause_not_covered (See https://fb.me/eqwalizer_errors#clause_not_covered)
┌─ check/src/approx.erl:85:1
85 │ ╭ dyn_union_2(N, D) ->
86 │ │ Res = case D of
87 │ │ 2 -> D;
88 │ │ 1 -> N
89 │ │ end,
90 │ │ eqwalizer:reveal_type(Res),
91 │ │ Res.
│ ╰─────^ Clause is not covered by spec
error: reveal_type (See https://fb.me/eqwalizer_errors#reveal_type)
┌─ check/src/approx.erl:90:25
90 │ eqwalizer:reveal_type(Res),
│ ^^^ dynamic(number())
11 ERRORS

View file

@ -0,0 +1,17 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/as_pat.erl:51:38
51 │ unboxL_neg(BN = #box_n{}) -> unbox_a(BN).
│ ^^ BN.
Expression has type: #box_n{}
Context expected type: #box_a{}
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/as_pat.erl:61:38
61 │ unboxR_neg(#box_n{} = BN) -> unbox_b(BN).
│ ^^ BN.
Expression has type: #box_n{}
Context expected type: #box_b{}
2 ERRORS

View file

@ -0,0 +1,17 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/as_pat.erl:51:38
51 │ unboxL_neg(BN = #box_n{}) -> unbox_a(BN).
│ ^^ BN.
Expression has type: #box_n{}
Context expected type: #box_a{}
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/as_pat.erl:61:38
61 │ unboxR_neg(#box_n{} = BN) -> unbox_b(BN).
│ ^^ BN.
Expression has type: #box_n{}
Context expected type: #box_b{}
2 ERRORS

View file

@ -2,8 +2,15 @@ error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types
┌─ check/src/auto_imports.erl:22:20
22 │ erlang:error(ok, ok).
│ ^^ 'ok'.
│ ^^
│ │
│ 'ok'.
Expression has type: 'ok'
Context expected type: [term()] | 'none'
'ok' is not compatible with [term()] | 'none'
because
'ok' is not compatible with [term()]
1 ERROR

View file

@ -0,0 +1,16 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/auto_imports.erl:22:20
22 │ erlang:error(ok, ok).
│ ^^
│ │
│ 'ok'.
Expression has type: 'ok'
Context expected type: [term()] | 'none'
'ok' is not compatible with [term()] | 'none'
because
'ok' is not compatible with [term()]
1 ERROR

View file

@ -0,0 +1,16 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/auto_imports.erl:22:20
22 │ erlang:error(ok, ok).
│ ^^
│ │
│ 'ok'.
Expression has type: 'ok'
Context expected type: [term()] | 'none'
'ok' is not compatible with [term()] | 'none'
because
'ok' is not compatible with [term()]
1 ERROR

View file

@ -0,0 +1,13 @@
error: unknown_id (See https://fb.me/eqwalizer_errors#unknown_id)
┌─ check/src/behave.erl:8:20
8 │ -callback foo() -> behave1:test().
│ ^^^^^^^^^^^^^^ Unknown id: behave1:test/0
error: unbound_type_var (See https://fb.me/eqwalizer_errors#unbound_type_var)
┌─ check/src/behave.erl:10:1
10 │ -type invalid() :: _T.
│ ^^^^^^^^^^^^^^^^^^^^^ _T: Type variable is unbound.
2 ERRORS

View file

@ -0,0 +1,13 @@
error: unknown_id (See https://fb.me/eqwalizer_errors#unknown_id)
┌─ check/src/behave.erl:8:20
8 │ -callback foo() -> behave1:test().
│ ^^^^^^^^^^^^^^ Unknown id: behave1:test/0
error: unbound_type_var (See https://fb.me/eqwalizer_errors#unbound_type_var)
┌─ check/src/behave.erl:10:1
10 │ -type invalid() :: _T.
│ ^^^^^^^^^^^^^^^^^^^^^ _T: Type variable is unbound.
2 ERRORS

View file

@ -0,0 +1,65 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:24:17
24 │ test04_neg() -> <<>>.
│ ^^^^ <<..>>.
Expression has type: binary()
Context expected type: [term()]
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:32:20
32 │ test05_neg(A) -> <<A/signed>>.
│ ^ A.
Expression has type: atom()
Context expected type: number()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:35:23
35 │ test06_neg(A, S) -> <<A:S>>.
│ ^ A.
Expression has type: atom()
Context expected type: number()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:38:18
38 │ test07_neg(A) -> [A].
│ ^^^ [A].
Expression has type: [atom()]
Context expected type: binary()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:55:7
55 │ <<"binary"/binary>>.
│ ^^^^^^^^ string_lit.
Expression has type: string()
Context expected type: binary()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:74:9
74 │ <<X:A>> = Bits,
│ ^ A.
Expression has type: atom()
Context expected type: number()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:80:9
80 │ <<X:(self())>> = Bits,
│ ^^^^^^^^ erlang:self().
Expression has type: pid()
Context expected type: number()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:89:7
89 │ <<[]>>.
│ ^^ [].
Expression has type: []
Context expected type: number()
8 ERRORS

View file

@ -0,0 +1,65 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:24:17
24 │ test04_neg() -> <<>>.
│ ^^^^ <<..>>.
Expression has type: binary()
Context expected type: [term()]
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:32:20
32 │ test05_neg(A) -> <<A/signed>>.
│ ^ A.
Expression has type: atom()
Context expected type: number()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:35:23
35 │ test06_neg(A, S) -> <<A:S>>.
│ ^ A.
Expression has type: atom()
Context expected type: number()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:38:18
38 │ test07_neg(A) -> [A].
│ ^^^ [A].
Expression has type: [atom()]
Context expected type: binary()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:55:7
55 │ <<"binary"/binary>>.
│ ^^^^^^^^ string_lit.
Expression has type: string()
Context expected type: binary()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:74:9
74 │ <<X:A>> = Bits,
│ ^ A.
Expression has type: atom()
Context expected type: number()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:80:9
80 │ <<X:(self())>> = Bits,
│ ^^^^^^^^ erlang:self().
Expression has type: pid()
Context expected type: number()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/binaries.erl:89:7
89 │ <<[]>>.
│ ^^ [].
Expression has type: []
Context expected type: number()
8 ERRORS

View file

@ -22,4 +22,10 @@ Because in the expression's type:
However the following candidate: 'false'
Differs from the expected type: 'true'
------------------------------ Detailed message ------------------------------
'false' | 'true' is not compatible with 'true'
because
'false' is not compatible with 'true'
2 ERRORS

View file

@ -0,0 +1,31 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/booleans.erl:47:3
47 │ 1 andalso b().
│ ^ 1.
Expression has type: number()
Context expected type: boolean()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/booleans.erl:59:3
59 │ dyn() andalso only_true().
│ ^^^^^^^^^^^^^^^^^^^^^^^^^
│ │
│ _ andalso _.
Expression has type: 'false' | 'true'
Context expected type: 'true'
Because in the expression's type:
Here the type is a union type with some valid candidates: 'true'
However the following candidate: 'false'
Differs from the expected type: 'true'
------------------------------ Detailed message ------------------------------
'false' | 'true' is not compatible with 'true'
because
'false' is not compatible with 'true'
2 ERRORS

View file

@ -0,0 +1,31 @@
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/booleans.erl:47:3
47 │ 1 andalso b().
│ ^ 1.
Expression has type: number()
Context expected type: boolean()
error: incompatible_types (See https://fb.me/eqwalizer_errors#incompatible_types)
┌─ check/src/booleans.erl:59:3
59 │ dyn() andalso only_true().
│ ^^^^^^^^^^^^^^^^^^^^^^^^^
│ │
│ _ andalso _.
Expression has type: 'false' | 'true'
Context expected type: 'true'
Because in the expression's type:
Here the type is a union type with some valid candidates: 'true'
However the following candidate: 'false'
Differs from the expected type: 'true'
------------------------------ Detailed message ------------------------------
'false' | 'true' is not compatible with 'true'
because
'false' is not compatible with 'true'
2 ERRORS

View file

@ -0,0 +1,14 @@
error: incorrect_return_type_in_cb_implementation (See https://fb.me/eqwalizer_errors#incorrect_return_type_in_cb_implementation)
┌─ check/src/callbacks3_neg.erl:12:1
12 │ -behavior(gen_server).
│ ^^^^^^^^^^^^^^^^^^^^^
│ │
│ Incorrect return type for implementation of gen_server:handle_cast/2. Expected: {'noreply', term()} | {'noreply', term(), timeout() | 'hibernate' | {'continue', term()}} | {'stop', term(), term()}, Got: 'wrong_ret'.
'wrong_ret' is not compatible with {'noreply', term()} | {'noreply', term(), timeout() | 'hibernate' | {'continue', term()}} | {'stop', term(), term()}
because
'wrong_ret' is not compatible with {'noreply', term()}
1 ERROR

View file

@ -0,0 +1,14 @@
error: incorrect_return_type_in_cb_implementation (See https://fb.me/eqwalizer_errors#incorrect_return_type_in_cb_implementation)
┌─ check/src/callbacks3_neg.erl:12:1
12 │ -behavior(gen_server).
│ ^^^^^^^^^^^^^^^^^^^^^
│ │
│ Incorrect return type for implementation of gen_server:handle_cast/2. Expected: {'noreply', term()} | {'noreply', term(), timeout() | 'hibernate' | {'continue', term()}} | {'stop', term(), term()}, Got: 'wrong_ret'.
'wrong_ret' is not compatible with {'noreply', term()} | {'noreply', term(), timeout() | 'hibernate' | {'continue', term()}} | {'stop', term(), term()}
because
'wrong_ret' is not compatible with {'noreply', term()}
1 ERROR

View file

@ -0,0 +1,14 @@
error: incorrect_return_type_in_cb_implementation (See https://fb.me/eqwalizer_errors#incorrect_return_type_in_cb_implementation)
┌─ check/src/callbacks3_neg.erl:12:1
12 │ -behavior(gen_server).
│ ^^^^^^^^^^^^^^^^^^^^^
│ │
│ Incorrect return type for implementation of gen_server:handle_cast/2. Expected: {'noreply', term()} | {'noreply', term(), gen_server:action()} | {'stop', term(), term()}, Got: 'wrong_ret'.
'wrong_ret' is not compatible with {'noreply', term()} | {'noreply', term(), gen_server:action()} | {'stop', term(), term()}
because
'wrong_ret' is not compatible with {'noreply', term()}
1 ERROR

View file

@ -1,27 +0,0 @@
error: incorrect_return_type_in_cb_implementation (See https://fb.me/eqwalizer_errors#incorrect_return_type_in_cb_implementation)
┌─ check/src/callbacks3_neg.erl:13:1
13 │ -behavior(gen_server).
│ ^^^^^^^^^^^^^^^^^^^^^ Incorrect return type for implementation of gen_server:handle_cast/2.
Expected: {'noreply', term()} | {'noreply', term(), timeout() | 'hibernate' | {'continue', term()}} | {'stop', term(), term()}
Got: 'wrong_ret'
error: incorrect_return_type_in_cb_implementation (See https://fb.me/eqwalizer_errors#incorrect_return_type_in_cb_implementation)
┌─ check/src/callbacks3_neg.erl:13:1
13 │ -behavior(gen_server).
│ ^^^^^^^^^^^^^^^^^^^^^
│ │
│ Incorrect return type for implementation of gen_server:handle_info/2.
Expected: {'noreply', term()} | {'noreply', term(), timeout() | 'hibernate' | {'continue', term()}} | {'stop', term(), term()}
Got: {'noreply', 'ok', 'wrong_atom'}
Because in the expression's type:
{ 'noreply', 'ok',
Here the type is: 'wrong_atom'
Context expects type: 'infinity' | number() | 'hibernate' | {'continue', term()}
No candidate matches in the expected union.
}
2 ERRORS

View file

@ -0,0 +1,7 @@
error: behaviour_does_not_exist (See https://fb.me/eqwalizer_errors#behaviour_does_not_exist)
┌─ check/src/callbacks4_neg.erl:9:1
9 │ -behaviour(nonexistent).
│ ^^^^^^^^^^^^^^^^^^^^^^^ Behaviour does not exist: nonexistent
1 ERROR

View file

@ -0,0 +1,7 @@
error: behaviour_does_not_exist (See https://fb.me/eqwalizer_errors#behaviour_does_not_exist)
┌─ check/src/callbacks4_neg.erl:9:1
9 │ -behaviour(nonexistent).
│ ^^^^^^^^^^^^^^^^^^^^^^^ Behaviour does not exist: nonexistent
1 ERROR

View file

@ -0,0 +1,7 @@
error: incorrect_param_type_in_cb_implementation (See https://fb.me/eqwalizer_errors#incorrect_param_type_in_cb_implementation)
┌─ check/src/callbacks5_neg.erl:13:1
13 │ -behavior(gen_server).
│ ^^^^^^^^^^^^^^^^^^^^^ Parameter 1 in implementation of gen_server:format_status/2 has no overlap with expected parameter type. Expected: 'normal' | 'terminate', Got: 'bad'.
1 ERROR

View file

@ -0,0 +1,7 @@
error: incorrect_param_type_in_cb_implementation (See https://fb.me/eqwalizer_errors#incorrect_param_type_in_cb_implementation)
┌─ check/src/callbacks5_neg.erl:13:1
13 │ -behavior(gen_server).
│ ^^^^^^^^^^^^^^^^^^^^^ Parameter 1 in implementation of gen_server:format_status/2 has no overlap with expected parameter type. Expected: 'normal' | 'terminate', Got: 'bad'.
1 ERROR

Some files were not shown because too many files have changed in this diff Show more