Compare commits

...

159 commits
v0.0.3 ... main

Author SHA1 Message Date
kbwo
c53cdb6a6b chore: v0.1.12 2025-01-29 22:53:29 +09:00
kbwo
df261d6756 fix: avoid incremental syncing
Basically, this LSP server diagnose code when saved
2025-01-29 22:52:50 +09:00
kbwo
48af0e012c chore: v0.1.11 2025-01-04 19:14:37 +09:00
kbwo
b8c0aa330c docs: update README 2025-01-04 19:14:18 +09:00
kbwo
03c298280b chore: remove debug log 2025-01-04 19:12:40 +09:00
kbwo
969010845a docs: update README 2025-01-04 19:12:28 +09:00
kbwo
87d463c36e chore(adpater); v0.1.2 2024-12-25 23:35:49 +09:00
kbwo
7b56fbcbe4 feat: remove "\n" prefix in node test diagnostic message 2024-12-25 23:35:07 +09:00
kbwo
855d2c0094 feat(adapter): add path field for server v0.1.10 2024-12-25 23:34:31 +09:00
kbwo
a766099b76 chore: v0.1.10 2024-12-25 23:32:06 +09:00
kbwo
4df71f69c0 feat: add path field to TestItem for demand from adapter 2024-12-25 23:31:26 +09:00
kbwo
5b8862fe5d chore: v0.1.9 2024-12-24 18:24:52 +09:00
kbwo
833bfbc1b7 fix: prevent diagnostics to be cleared when :edit in vim 2024-12-24 18:24:37 +09:00
kbwo
42354d8ec5 refactor(server): fix for clippy 2024-12-24 17:27:42 +09:00
kbwo
5a672ddf71 chore(adapter): 0.1.1 2024-12-24 17:12:17 +09:00
kbwo
caa4463119 Merge branch 'fix/adapter-types' 2024-12-24 17:11:52 +09:00
kbwo
942f0b4ed1 fix(adapter): return valid json type to server 2024-12-24 17:11:06 +09:00
Kodai Kabasawa
91db91a229
Merge pull request #53 from kbwo/feat/update-adapter-spec
adapter: Update specification
2024-12-07 23:28:10 +09:00
kbwo
6c75bbbd47 chore(adapter): v0.1.0 2024-12-07 23:25:23 +09:00
kbwo
1df3076259 refactor: delete unused variable 2024-12-07 23:24:54 +09:00
kbwo
787e92fe87 feat(adapter): use new interface of detect-workspace 2024-12-07 23:24:28 +09:00
kbwo
df0631a6d6 feat(adapter): use new interface of discover 2024-12-07 23:24:23 +09:00
kbwo
2fa914973f feat(adapter): use new interface of run-file-test 2024-12-07 23:24:17 +09:00
kbwo
1fae0f701a chore(adapter): update adapter dependency 2024-12-07 23:24:03 +09:00
Kodai Kabasawa
cd5a15eace
Merge pull request #52 from kbwo/feat/update-server-spec
Update specification
2024-12-07 23:19:01 +09:00
kbwo
b28ba1aee0 chore: v0.1.8 2024-12-07 23:15:02 +09:00
kbwo
99cd28c540 feat: update interface of detect-workspace result 2024-12-07 23:11:18 +09:00
kbwo
bb4d9687df feat: update interface of discover result 2024-12-07 23:11:01 +09:00
kbwo
b205f9580d feat: update interface of run-file-test result 2024-12-07 23:10:30 +09:00
kbwo
d98623211d docs: update README 2024-12-07 17:28:06 +09:00
kbwo
c511c02107 chore: update error message 2024-12-02 19:39:23 +09:00
kbwo
aa4883e038 feat(adapter): add diagnostics to show which test failed 2024-11-30 18:22:53 +09:00
kbwo
e09a50d60c feat: change warning diagnostics to window/showMessage 2024-11-30 16:51:40 +09:00
kbwo
16074f3b4a chore(adapter): update version 2024-11-20 19:26:37 +09:00
kbwo
115cca2ecf feat(cargo-test): handle --nocapture option 2024-11-20 19:25:48 +09:00
kbwo
7563cd801c chore: update version 2024-11-20 19:24:21 +09:00
kbwo
c5fe76295d fix: handle errors correctly 2024-11-20 19:23:17 +09:00
kbwo
c61aaac03e chore: update version 2024-11-20 01:09:38 +09:00
Kodai Kabasawa
da18ce6586
Merge pull request #49 from kbwo/fix/handle-error
fix: handle unexpected request method correctly
2024-11-20 01:08:50 +09:00
kbwo
c939c3c5e0 fix: handle unexpected request method correctly 2024-11-20 01:07:27 +09:00
kbwo
533c5106e3 chore: update version 2024-11-19 23:17:15 +09:00
kbwo
aefd8437ff feat: diagnose workspace after initialization 2024-11-19 23:16:24 +09:00
kbwo
df735e9836 chore: update version 2024-11-19 19:47:08 +09:00
Kodai Kabasawa
55cbc584d8
Merge pull request #47 from kbwo/chore/sync
fix: prevent syncing text documents
2024-11-19 19:46:09 +09:00
kbwo
b87e2b9123 fix: prevent syncing text documents 2024-11-19 19:45:23 +09:00
kbwo
b8054f6594 chore: update version 2024-11-10 15:52:37 +09:00
Kodai Kabasawa
520a7e4c5f
Merge pull request #45 from kbwo/feat/toml-config
Configuration with external file
2024-11-10 15:50:51 +09:00
kbwo
33f44e5bad refactor: tiny fix 2024-11-10 15:46:53 +09:00
kbwo
6f1fc1f03a chore: update each settings for demo 2024-11-10 15:11:25 +09:00
kbwo
f20cb7f16b feat: workspace diagnostics even when no workspace/diagnostic request 2024-11-09 18:19:01 +09:00
kbwo
575f3ee661 feat: get configuration from toml file 2024-11-09 18:18:53 +09:00
kbwo
92dec010a8 chore: update version 2024-10-30 23:55:37 +09:00
kbwo
6dff68d23a doc: update README 2024-10-30 23:45:39 +09:00
Kodai Kabasawa
7da5b98850
Merge pull request #43 from kbwo/fix/data-structure
Change adapterCommand schema
2024-10-30 23:43:36 +09:00
kbwo
c6d839395d doc: update README 2024-10-30 23:37:00 +09:00
Kodai Kabasawa
27bfdd0245
Merge pull request #42 from kbwo/demo/helix
Helix configuration
2024-10-26 00:19:56 +09:00
kbwo
5211310b7f chore(demo): add example config of helix 2024-10-26 00:18:13 +09:00
kbwo
fa78f5faec chore(demo): update editor config according to previous schema change 2024-10-25 00:42:11 +09:00
kbwo
6c77af09c9 fix: change data structure of initialization params 2024-10-25 00:40:19 +09:00
kbwo
ee2acbb3a4 docs: update README 2024-10-19 17:14:16 +09:00
kbwo
1fc98e9063 chore: update version 2024-10-19 17:13:34 +09:00
kbwo
310263bea5 chore: add logs for troubleshooting 2024-10-19 17:12:33 +09:00
Kodai Kabasawa
0c1fd99098
Merge pull request #37 from kbwo/feat/node-test
adapter for Node Test Runner
2024-10-19 17:11:56 +09:00
kbwo
48c2388d95 chore(demo): update vscode settings 2024-10-19 16:57:56 +09:00
kbwo
cf926f56a2 fix(adapter): support nested namespace definition 2024-10-19 16:50:52 +09:00
kbwo
7ce2e97df6 fix(adapter): fix top level tests to not be in namespace 2024-10-19 13:41:09 +09:00
kbwo
8bfe0e9435 feat: implement adapter for node --test 2024-10-19 12:12:11 +09:00
kbwo
d275c28e88 chore(demo): add demo code of node --test 2024-10-18 00:04:40 +09:00
Kodai Kabasawa
86481a6d0a
Merge pull request #38 from sho-hata/fix-doc
docs: update adapter specification link in README
2024-10-17 08:51:58 +09:00
sho-hata
4bb5144e0c docs: update adapter specification link in README 2024-10-17 08:08:42 +09:00
kbwo
2bc6a5acf3 chore: update version 2024-10-14 21:47:16 +09:00
kbwo
570a755e50 chore(demo): update vscode settings 2024-10-14 21:46:50 +09:00
kbwo
46a9d12823 fix: handle shutdown correctly according to specification
Specification:
https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#shutdown
2024-10-14 21:45:50 +09:00
kbwo
aee202b9d1 chore: update versions 2024-10-14 00:01:24 +09:00
kbwo
54c3ca3e29 docs: update README 2024-10-14 00:00:56 +09:00
kbwo
3d01eef6b1 chore(adapter): add tracing logs 2024-10-14 00:00:34 +09:00
kbwo
7935caf4b3 chore: update specification 2024-10-13 23:41:51 +09:00
kbwo
dfd017c5d3 chore: update version 2024-10-12 17:33:29 +09:00
kbwo
b399d22656 fix: rename log file name 2024-10-12 17:32:59 +09:00
kbwo
f6aea92264 feat(adapter): delete old adapter log files 2024-10-12 17:30:49 +09:00
kbwo
f2e2116683 ci: enable to pass test failed in CI 2024-10-12 17:20:55 +09:00
kbwo
ab33121fc5 chore: update version 2024-10-12 17:18:07 +09:00
kbwo
96855b782c chore: change adapter config specification
simplify key names
2024-10-12 17:17:15 +09:00
kbwo
13757136ba feat: delete old server log files 2024-10-12 17:16:37 +09:00
kbwo
7b59f5ae74 ci: update release procedure 2024-10-10 00:35:52 +09:00
kbwo
4e9f1b5c35 chore(adapter): update version 2024-10-10 00:28:51 +09:00
kbwo
b71ea060ef chore: update version 2024-10-10 00:28:41 +09:00
kbwo
3d6b53ccdd docs: update README 2024-10-09 09:08:19 +09:00
kbwo
da2e469337 fix: refresh workspace cache when it is empty 2024-10-09 09:07:41 +09:00
kbwo
ded6f382ac docs: update README 2024-09-29 00:54:01 +09:00
kbwo
20ae3b3487 docs: add documentation for adapter 2024-09-29 00:53:43 +09:00
Kodai Kabasawa
403ba08c81
Merge pull request #35 from kbwo/feat/tmp-phpunit
phpunit adapter
2024-09-23 21:27:15 +09:00
Kodai Kabasawa
245fd10346
Merge pull request #34 from kbwo/feat/handle-shutdown-exit
feat: handle `shutdown` and `exit`
2024-09-23 17:57:23 +09:00
kbwo
8f4097aab3 chore: add vscode settings for debugging with vscode-tesitng-ls 2024-09-23 17:56:16 +09:00
kbwo
9bb28ea592 chore: add log 2024-09-23 17:56:16 +09:00
kbwo
3e3d4c5db1 feat: implement phpunit adapter 2024-09-23 17:56:13 +09:00
kbwo
73a179f90b feat: handle shutdown and exit
close #33
2024-09-23 11:18:15 +09:00
kbwo
daa8db434c chore(adapter): write both of stdout and stderr to log 2024-08-17 16:27:58 +09:00
kbwo
b08c350b78 chore: update version 2024-08-15 23:20:26 +09:00
kbwo
cce066cd2a feat: add method to run workspace test 2024-08-15 23:14:06 +09:00
kbwo
63e1c653d9 feat(adapter): write test result to file 2024-08-15 22:58:00 +09:00
kbwo
3131833238 fix: revert specification due to design flaw 2024-08-14 23:39:40 +09:00
kbwo
7a6d6e28b5 chore: update version 2024-08-13 22:14:21 +09:00
kbwo
a983a09a8e feat: update adapter specification to distinguish between successful and failed tests in extensions 2024-08-13 22:08:40 +09:00
kbwo
c856f69086 refactor: fix comment 2024-08-13 21:54:40 +09:00
kbwo
ef51778b60 test(adapter): prevent test failure 2024-08-13 21:47:55 +09:00
kbwo
48e397efd3 fix(adapter): prevent test name duplication 2024-08-13 21:44:53 +09:00
kbwo
a8c52dc6c4 refactor: rename a variable 2024-08-13 21:44:42 +09:00
kbwo
2ba8d3d225 fix: use valid field in payload 2024-08-13 17:13:39 +09:00
kbwo
6104ca0438 refactor(client): fix typo 2024-08-13 16:48:34 +09:00
kbwo
d081843580 refactor: add some comments 2024-08-13 16:32:05 +09:00
kbwo
319e3b69d8 fix:(adapter): change format of test item id and name 2024-08-07 00:20:57 +09:00
kbwo
56c97a4780 chore(adapter): update version 2024-08-05 22:26:35 +09:00
kbwo
ad00f2c764 chore: update version 2024-08-05 21:53:36 +09:00
kbwo
f7e4ffac6f fix: prevent accessing to a non-existent field 2024-08-05 21:50:59 +09:00
kbwo
9c3b8719a2 fix: enable workspace diagnostics by default 2024-08-05 21:50:39 +09:00
kbwo
0f73fa2abd fix(adapter): format test item name 2024-08-04 22:21:16 +09:00
kbwo
7c5e365167 chore: update demo 2024-08-04 22:14:05 +09:00
kbwo
d3f3528c53 fix: detect namespace correctly
fix #31
2024-08-04 22:13:52 +09:00
kbwo
f1a10ceb28 chore: update version 2024-08-04 18:57:48 +09:00
Kodai Kabasawa
1447e9e085
Merge pull request #29 from kbwo/feat/config-workspace
Configuration to disable workspace diagnostics #28
2024-08-04 18:51:43 +09:00
kbwo
10f3453cad chore: update coc-settings 2024-08-04 18:29:57 +09:00
kbwo
8939558793 feat: add configuration to disable workspace diagnostics
close #28
2024-08-04 18:28:33 +09:00
kbwo
90be4e498f chore(adapter): change log file location 2024-08-04 17:33:19 +09:00
kbwo
65e95494bd refactor: move some functions for main loop to main.rs 2024-08-04 17:33:15 +09:00
kbwo
8ecf8891f8 feat: consolidate duplicate code into functions 2024-08-04 16:49:02 +09:00
kbwo
a1a0afc585 refactor: Reduced the memory size of structure field 2024-08-04 16:21:13 +09:00
kbwo
4e1b802485 refactor(demo): remove yarn related files 2024-08-04 15:46:31 +09:00
kbwo
22401774e1 refactor: remove some .clone() 2024-08-04 15:46:12 +09:00
kbwo
c3d3ff2c67 refactor: rename method 2024-07-22 23:35:34 +09:00
kbwo
d8bd53d4ca refactor(adapter): unify repetitive code patterns 2024-07-22 23:33:22 +09:00
Kodai Kabasawa
bb1a163f98
Merge pull request #26 from kbwo/feat/deno
adapter for deno
2024-07-21 16:02:33 +09:00
kbwo
6cb4fcbd92 feat(adapter): implement adapter for deno 2024-07-21 15:58:55 +09:00
Kodai Kabasawa
70c0fd4a2d
Merge pull request #20 from kbwo/feat/cargo-nextest
feat(adapter): add adapter for cargo-nextest
2024-07-16 00:31:13 +09:00
kbwo
b887d653a2 feat(adapter): add adapter for cargo-nextest
close #18
2024-07-16 00:23:19 +09:00
Kodai Kabasawa
3603e32025
Merge pull request #19 from kbwo/feat/vitest
adapter for vitest
2024-07-15 22:49:05 +09:00
kbwo
f78cf7f888 test(adapter): fix failed test 2024-07-15 19:14:27 +09:00
kbwo
1d6dd6623e refactor: rename directory 2024-07-15 19:13:19 +09:00
kbwo
4782784122 refactor(adapter): set diagnostic severity explicitly 2024-07-15 19:10:02 +09:00
kbwo
5ac623bb69 feat(adapter): add vitest adapter 2024-07-15 19:09:56 +09:00
kbwo
3f88f9b1b6 add vitest demo 2024-07-15 19:07:44 +09:00
kbwo
0ae8988cc5 add feature to configure workspace directory
close #16
2024-07-14 00:19:13 +09:00
kbwo
1b74a7ad4f add adapter log 2024-07-06 15:54:30 +09:00
kbwo
60e352f24f list project files correctly 2024-07-06 15:54:02 +09:00
kbwo
f9c054ebd3 organize modules 2024-07-06 11:20:41 +09:00
kbwo
b86e08e588 update README 2024-06-30 22:42:07 +09:00
Kodai Kabasawa
1ba9181a30
Merge pull request #15 from kbwo/license
add license
2024-06-30 22:40:50 +09:00
kbwo
caf868f9bd add license 2024-06-30 22:39:44 +09:00
Kodai Kabasawa
e32ba5d0c2
Merge pull request #14 from kbwo/ci
fix a bug in nested workspace and add CI
2024-06-30 22:32:46 +09:00
kbwo
51738a9a45 add GHA workflow for CI 2024-06-30 22:30:17 +09:00
kbwo
5f3759ec28 fix a bug in nested workspace 2024-06-30 22:14:56 +09:00
Kodai Kabasawa
b8b04b59ab
Merge pull request #12 from kbwo/feat/go-adapter
adapter for `go test`
2024-06-30 21:42:16 +09:00
kbwo
ac35284f68 update coc-settings.json for demo 2024-06-30 21:25:04 +09:00
kbwo
07b516e798 add go test feature to adapter 2024-06-30 18:20:27 +09:00
kbwo
80ee356a06 add example of go project 2024-06-30 18:19:51 +09:00
kbwo
582589af60 test only the file for file level test 2024-06-24 00:34:04 +09:00
Kodai Kabasawa
146607d132 Merge pull request #8 from kbwo/fix/new-file
refresh workspace cache if needed, fix #7
2024-06-24 00:33:22 +09:00
kbwo
c4a472d5e0 refresh workspace cache if needed, fix #7 2024-06-23 20:00:36 +09:00
kbwo
67e20e5b4d update adapter dependency 2024-06-22 21:06:55 +09:00
89 changed files with 7459 additions and 1063 deletions

30
.github/workflows/ci.yml vendored Normal file
View file

@ -0,0 +1,30 @@
name: Rust CI
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
env:
CARGO_TERM_COLOR: always
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- name: Build (required step)
run: cargo build --workspace
- name: Run tests
run: cargo test --verbose --workspace -- --nocapture

View file

@ -1,31 +1,60 @@
name: Release
name: Release Adapter
on:
push:
tags:
- 'adapter-v*.*.*'
branches:
- main
paths:
- crates/adapter/Cargo.toml
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
check-version:
runs-on: ubuntu-latest
outputs:
version_changed: ${{ steps.check_version.outputs.version_changed }}
new_version: ${{ steps.check_version.outputs.new_version }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Check if version changed
id: check_version
run: |
PACKAGE_NAME=$(grep '^name' crates/adapter/Cargo.toml | sed 's/name = "\(.*\)"/\1/')
RELEASED_VERSION=$(cargo search $PACKAGE_NAME --limit 1 | grep -oP '(?<=").*(?=")')
if [ $? -ne 0 ]; then
echo "Failed to fetch released version"
exit 1
fi
NEW_VERSION=$(grep '^version' crates/adapter/Cargo.toml | sed 's/version = "\(.*\)"/\1/')
if [ "$RELEASED_VERSION" != "$NEW_VERSION" ]; then
echo "Version changed from $RELEASED_VERSION to $NEW_VERSION"
echo "version_changed=true" >> $GITHUB_OUTPUT
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
else
echo "No version change"
fi
publish:
needs: check-version
if: needs.check-version.outputs.version_changed == 'true'
runs-on: ubuntu-latest
defaults:
run:
working-directory: crates/adapter
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Build project
run: cargo build --release
- name: Publish to crates.io
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: cargo publish --token $CARGO_REGISTRY_TOKEN
- uses: actions/checkout@v3
- name: Set up Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Publish to crates.io
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: cargo publish --token $CARGO_REGISTRY_TOKEN

View file

@ -1,39 +1,70 @@
name: Release
name: Auto Release
on:
push:
tags:
- 'v*.*.*'
branches:
- main
paths:
- Cargo.toml
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
check-version:
runs-on: ubuntu-latest
outputs:
version_changed: ${{ steps.check_version.outputs.version_changed }}
new_version: ${{ steps.check_version.outputs.new_version }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Check if version changed
id: check_version
run: |
PACKAGE_NAME=$(grep '^name' Cargo.toml | sed 's/name = "\(.*\)"/\1/')
RELEASED_VERSION=$(cargo search $PACKAGE_NAME --limit 1 | grep -oP '(?<=").*(?=")')
if [ $? -ne 0 ]; then
echo "Failed to fetch released version"
exit 1
fi
NEW_VERSION=$(grep '^version' Cargo.toml | sed 's/version = "\(.*\)"/\1/')
if [ "$RELEASED_VERSION" != "$NEW_VERSION" ]; then
echo "Version changed from $RELEASED_VERSION to $NEW_VERSION"
echo "version_changed=true" >> $GITHUB_OUTPUT
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
else
echo "No version change"
fi
- name: Set up Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
create-release:
needs: check-version
if: needs.check-version.outputs.version_changed == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Create Release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh release create v${{ needs.check-version.outputs.new_version }} \
--title "Release ${{ needs.check-version.outputs.new_version }}" \
--generate-notes
- name: Build project
run: cargo build --release
# - name: Archive the build artifacts
# run: tar -czvf build.tar.gz -C target/release .
#
# - name: Create Release
# id: create_release
# uses: softprops/action-gh-release@v1
# with:
# tag_name: ${{ github.ref }}
# files: build.tar.gz
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Publish to crates.io
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: cargo publish --token $CARGO_REGISTRY_TOKEN
publish:
needs: [check-version, create-release]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Publish to crates.io
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: cargo publish --token $CARGO_REGISTRY_TOKEN

15
.testingls.toml Normal file
View file

@ -0,0 +1,15 @@
enableWorkspaceDiagnostics = true
[adapterCommand.rust]
path = "testing-ls-adapter"
extra_arg = [
"--test-kind=cargo-test",
"--workspace"
]
include = [
"/**/*.rs"
]
exclude = [
"/demo/**/*"
]
workspace_dir = "."

6
.vim/coc-settings.json Normal file
View file

@ -0,0 +1,6 @@
{
"testing.enable": true,
"testing.enableWorkspaceDiagnostics": true,
"testing.server.path": "testing-language-server",
"testing.trace.server": "verbose"
}

9
CONTRIBUTING.md Normal file
View file

@ -0,0 +1,9 @@
# Getting started
```sh
cargo install just
cargo install cargo-watch
just watch-build
sudo ln -s $(pwd)/target/debug/testing-language-server /usr/local/bin/testing-language-server
sudo ln -s $(pwd)/target/debug/testing-ls-adapter /usr/local/bin/testing-ls-adapter
```

388
Cargo.lock generated
View file

@ -1,6 +1,6 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
version = 4
[[package]]
name = "aho-corasick"
@ -11,6 +11,21 @@ dependencies = [
"memchr",
]
[[package]]
name = "android-tzdata"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
[[package]]
name = "android_system_properties"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
dependencies = [
"libc",
]
[[package]]
name = "anstream"
version = "0.6.13"
@ -65,6 +80,12 @@ version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "bitflags"
version = "1.3.2"
@ -78,10 +99,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "cc"
version = "1.0.96"
name = "bstr"
version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd"
checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706"
dependencies = [
"memchr",
"serde",
]
[[package]]
name = "bumpalo"
version = "3.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "cc"
version = "1.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "066fce287b1d4eafef758e89e09d724a24808a9196fe9756b8ca90e86d0719a2"
[[package]]
name = "cfg-if"
@ -89,6 +126,20 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
dependencies = [
"android-tzdata",
"iana-time-zone",
"js-sys",
"num-traits",
"wasm-bindgen",
"windows-targets 0.52.5",
]
[[package]]
name = "clap"
version = "4.5.4"
@ -135,6 +186,12 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "crossbeam-channel"
version = "0.5.12"
@ -144,6 +201,25 @@ dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.19"
@ -180,6 +256,12 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "errno"
version = "0.3.8"
@ -222,6 +304,36 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "globset"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1"
dependencies = [
"aho-corasick",
"bstr",
"log",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "globwalk"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bf760ebf69878d9fd8f110c89703d90ce35095324d1f1edcb595c63945ee757"
dependencies = [
"bitflags 2.5.0",
"ignore",
"walkdir",
]
[[package]]
name = "hashbrown"
version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3"
[[package]]
name = "heck"
version = "0.4.1"
@ -234,6 +346,29 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "iana-time-zone"
version = "0.1.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
dependencies = [
"android_system_properties",
"core-foundation-sys",
"iana-time-zone-haiku",
"js-sys",
"wasm-bindgen",
"windows-core",
]
[[package]]
name = "iana-time-zone-haiku"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
"cc",
]
[[package]]
name = "idna"
version = "0.5.0"
@ -244,12 +379,47 @@ dependencies = [
"unicode-normalization",
]
[[package]]
name = "ignore"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b46810df39e66e925525d6e38ce1e7f6e1d208f72dc39757880fcb66e2c58af1"
dependencies = [
"crossbeam-deque",
"globset",
"log",
"memchr",
"regex-automata",
"same-file",
"walkdir",
"winapi-util",
]
[[package]]
name = "indexmap"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
dependencies = [
"equivalent",
"hashbrown",
]
[[package]]
name = "itoa"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
[[package]]
name = "js-sys"
version = "0.3.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9"
dependencies = [
"wasm-bindgen",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
@ -278,6 +448,12 @@ version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
[[package]]
name = "log"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "lsp-types"
version = "0.95.1"
@ -303,6 +479,15 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.19.0"
@ -416,6 +601,15 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "serde"
version = "1.0.198"
@ -458,6 +652,15 @@ dependencies = [
"syn",
]
[[package]]
name = "serde_spanned"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
dependencies = [
"serde",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
@ -520,14 +723,16 @@ dependencies = [
[[package]]
name = "testing-language-server"
version = "0.0.2"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cc86bccfcfd40400582bd0e9a970e0904e0fcfd3890ca935caaa868cf0a787c"
checksum = "70cedb2999008b364b1686c77a9e34531f1a31095f3177cfc11500d0ab5bd727"
dependencies = [
"anyhow",
"chrono",
"clap",
"dirs",
"glob",
"globwalk",
"lsp-types",
"once_cell",
"regex",
@ -535,19 +740,23 @@ dependencies = [
"serde_json",
"strum",
"thiserror",
"toml",
"tracing",
"tracing-appender",
"tracing-subscriber",
"tree-sitter-php",
]
[[package]]
name = "testing-language-server"
version = "0.0.3"
version = "0.1.12"
dependencies = [
"anyhow",
"chrono",
"clap",
"dirs",
"glob",
"globwalk",
"lsp-types",
"once_cell",
"regex",
@ -555,26 +764,35 @@ dependencies = [
"serde_json",
"strum",
"thiserror",
"toml",
"tracing",
"tracing-appender",
"tracing-subscriber",
"tree-sitter-php",
]
[[package]]
name = "testing-ls-adapter"
version = "0.0.1"
version = "0.1.2"
dependencies = [
"anyhow",
"clap",
"dirs",
"lsp-types",
"regex",
"serde",
"serde_json",
"tempfile",
"testing-language-server 0.0.2",
"testing-language-server 0.1.10",
"tracing",
"tracing-appender",
"tracing-subscriber",
"tree-sitter",
"tree-sitter-go",
"tree-sitter-javascript",
"tree-sitter-php",
"tree-sitter-rust",
"xml-rs",
]
[[package]]
@ -653,6 +871,40 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "toml"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
]
[[package]]
name = "toml_datetime"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.22.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"winnow",
]
[[package]]
name = "tracing"
version = "0.1.40"
@ -717,6 +969,16 @@ dependencies = [
"regex",
]
[[package]]
name = "tree-sitter-go"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55cb318be5ccf75f44e054acf6898a5c95d59b53443eed578e16be0cd7ec037f"
dependencies = [
"cc",
"tree-sitter",
]
[[package]]
name = "tree-sitter-javascript"
version = "0.21.0"
@ -727,6 +989,16 @@ dependencies = [
"tree-sitter",
]
[[package]]
name = "tree-sitter-php"
version = "0.22.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1be890bd043986cc26b69968698e508dbd40060805e482f226dc873a63a88d60"
dependencies = [
"cc",
"tree-sitter",
]
[[package]]
name = "tree-sitter-rust"
version = "0.21.2"
@ -776,12 +1048,95 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e"
dependencies = [
"cfg-if",
"once_cell",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
[[package]]
name = "winapi-util"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b"
dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "windows-core"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
"windows-targets 0.52.5",
]
[[package]]
name = "windows-sys"
version = "0.48.0"
@ -920,3 +1275,18 @@ name = "windows_x86_64_msvc"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
[[package]]
name = "winnow"
version = "0.6.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b"
dependencies = [
"memchr",
]
[[package]]
name = "xml-rs"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601"

View file

@ -1,14 +1,13 @@
[package]
name = "testing-language-server"
version = "0.0.3"
version = "0.1.12"
edition = "2021"
author = "Kodai Kabasawa <kabaaa1126@gmail.com>"
description = "LSP server for testing"
license = "MIT"
[workspace]
members = [ "crates/adapter"]
exclude = ["test_proj"]
exclude = ["demo"]
[[bin]]
name = "testing-language-server"
@ -46,3 +45,7 @@ clap = { workspace = true }
once_cell = { workspace = true }
strum = { workspace = true, features = ["derive"] }
glob = { workspace = true }
globwalk = "0.9.1"
tree-sitter-php = "0.22.8"
chrono = "0.4.38"
toml = "0.8.19"

21
LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 Kodai Kabasawa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

143
README.md
View file

@ -1,76 +1,125 @@
# testing-language-server
⚠️ **IMPORTANT NOTICE**
This project is under active development and may introduce breaking changes. If you encounter any issues, please make sure to update to the latest version before reporting bugs.
General purpose LSP server that integrate with testing.
The language server is characterized by portability and extensibility.
## Motivation
This LSP server is heavily influenced by the following tools
- [neotest](https://github.com/nvim-neotest/neotest)
- [Wallaby.js](https://wallabyjs.com)
These tools are very useful and powerful. However, they depend on the execution environment, such as VSCode and NeoVim, and the portability aspect was inconvenient for me.
So, I designed this testing-language-server and its dedicated adapters for each test tool to be the middle layer to the parts that depend on each editor.
These tools are very useful and powerful. However, they depend on the execution environment, such as VSCode and Neovim, and the portability aspect was inconvenient for me.
So, I designed this testing-language-server and its dedicated adapters for each test tool to be the middle layer to the parts that depend on each editor.
This design makes it easy to view diagnostics from tests in any editor. Environment-dependent features like neotest and VSCode's built-in testing tools can also be achieved with minimal code using testing-language-server.
## Instllation
```sh
cargo install testing-language-server
cargo install testing-ls-adapter
```
## Features
- [x] Realtime testing diagnostics
- [x] [VSCode extension](https://github.com/kbwo/vscode-testing-ls)
- [x] [coc.nvim extension](https://github.com/kbwo/coc-testing-ls)
- [x] For Neovim builtin LSP, see [testing-ls.nvim](https://github.com/kbwo/testing-ls.nvim)
- [ ] More efficient checking of diagnostics
- [ ] Adapter installation command
- [ ] VSCode extension
- [ ] Coc.nvim extension
- [ ] NeoVim builtin LSP plugin
- [ ] Useful commands in each extension
## Configuration
language server config:
### Required settings for all editors
You need to prepare .testingls.toml. See [this](./demo/.testingls.toml) for an example of the configuration.
```.testingls.toml
enableWorkspaceDiagnostics = true
[adapterCommand.cargo-test]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=cargo-test"]
include = ["/**/src/**/*.rs"]
exclude = ["/**/target/**"]
[adapterCommand.cargo-nextest]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=cargo-nextest"]
include = ["/**/src/**/*.rs"]
exclude = ["/**/target/**"]
[adapterCommand.jest]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=jest"]
include = ["/jest/*.js"]
exclude = ["/jest/**/node_modules/**/*"]
[adapterCommand.vitest]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=vitest"]
include = ["/vitest/*.test.ts", "/vitest/config/**/*.test.ts"]
exclude = ["/vitest/**/node_modules/**/*"]
[adapterCommand.deno]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=deno"]
include = ["/deno/*.ts"]
exclude = []
[adapterCommand.go]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=go-test"]
include = ["/**/*.go"]
exclude = []
[adapterCommand.node-test]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=node-test"]
include = ["/node-test/*.test.js"]
exclude = []
[adapterCommand.phpunit]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=phpunit"]
include = ["/**/*Test.php"]
exclude = ["/phpunit/vendor/**/*.php"]
```
"languageserver": {
"testing": {
"command": "<server path>/testing-language-server",
"trace.server": "verbose",
"filetypes": [
"rust",
"javascript"
],
"initializationOptions": {
"initializationOptions": {
"adapterCommand": {
"cargo test": [
{
"path": "<adapter path>/testing-ls-adapter",
"extra_args": ["--test-kind=cargo-test"],
"include_pattern": ["**/*.rs"],
"exclude_pattern": ["**/target/**"]
}
],
"jest": [
{
"path": "<adapter path>/testing-ls-adapter",
"extra_args": ["--test-kind=jest"],
"include_patterns": ["/**/*.js"],
"exclude_patterns": ["/node_modules/**/*"]
}
]
}
}
}
}
}
```
### VSCode
Install from [VSCode Marketplace](https://marketplace.visualstudio.com/items?itemName=kbwo.testing-language-server).
You can see the example in [settings.json](./demo/.vscode/settings.json).
### coc.nvim
Install from `:CocInstall coc-testing-ls`.
You can see the example in [See more example](./.vim/coc-settings.json)
### Neovim (nvim-lspconfig)
See [testing-ls.nvim](https://github.com/kbwo/testing-ls.nvim)
### Helix
See [language.toml](./demo/.helix/language.toml).
The array wrapper has been removed to simplify the configuration structure. Please update your settings accordingly.
## Adapter
- [x] cargo test
- [x] jest
- [ ] others
- [x] `cargo test`
- [x] `cargo nextest`
- [x] `jest`
- [x] `deno test`
- [x] `go test`
- [x] `phpunit`
- [x] `vitest`
- [x] `node --test` (Node Test Runner)
### Writing custom adapter
⚠ The specification of adapter CLI is not stabilized yet.
See [spec.rs](./src/spec.rs).
[clap](https://docs.rs/clap) crate makes it easy to address specification, but in principle you can create an adapter in any way you like, regardless of the language you implement.
See [ADAPTER_SPEC.md](./doc/ADAPTER_SPEC.md) and [spec.rs](./src/spec.rs).

View file

@ -1,6 +1,6 @@
[package]
name = "testing-ls-adapter"
version = "0.0.1"
version = "0.1.2"
edition = "2021"
description = "testing-language-server adapter"
license = "MIT"
@ -8,7 +8,7 @@ license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
testing-language-server = "0.0.2"
testing-language-server = "0.1.10"
lsp-types = { workspace = true }
serde_json = { workspace = true }
serde = { workspace = true }
@ -19,3 +19,10 @@ tree-sitter-rust = "0.21.2"
anyhow = { workspace = true }
tempfile = "3.10.1"
tree-sitter-javascript = "0.21.0"
tree-sitter-go = "0.21.0"
tree-sitter-php = "0.22.5"
tracing-appender = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, default-features = false }
dirs = "5.0.1"
xml-rs = "0.8.21"

30
crates/adapter/src/log.rs Normal file
View file

@ -0,0 +1,30 @@
use std::path::PathBuf;
use testing_language_server::util::clean_old_logs;
use tracing_appender::non_blocking::WorkerGuard;
pub struct Log;
impl Log {
fn log_dir() -> PathBuf {
let home_dir = dirs::home_dir().unwrap();
home_dir.join(".config/testing_language_server/adapter/logs")
}
pub fn init() -> Result<WorkerGuard, anyhow::Error> {
let log_dir_path = Self::log_dir();
let prefix = "adapter.log";
let file_appender = tracing_appender::rolling::daily(&log_dir_path, prefix);
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
clean_old_logs(
log_dir_path.to_str().unwrap(),
30,
&format!("{prefix}.*"),
&format!("{prefix}."),
)
.unwrap();
tracing_subscriber::fmt().with_writer(non_blocking).init();
Ok(guard)
}
}

View file

@ -1,6 +1,8 @@
use crate::model::AvailableTestKind;
use crate::model::Runner;
use anyhow::anyhow;
use clap::Parser;
use log::Log;
use std::io;
use std::io::Write;
use std::str::FromStr;
@ -9,18 +11,18 @@ use testing_language_server::spec::AdapterCommands;
use testing_language_server::spec::DetectWorkspaceArgs;
use testing_language_server::spec::DiscoverArgs;
use testing_language_server::spec::RunFileTestArgs;
pub mod log;
pub mod model;
pub mod runner;
fn pick_test_from_extra(
extra: &mut [String],
) -> Result<(Vec<String>, AvailableTestKind), anyhow::Error> {
// extraから--test-kind=のものを取り出し、元の配列から`--test-kind=`のものは除外する
let mut extra = extra.to_vec();
let index = extra
.iter()
.position(|arg| arg.starts_with("--test-kind="))
.unwrap();
.ok_or(anyhow!("test-kind is not found"))?;
let test_kind = extra.remove(index);
let language = test_kind.replace("--test-kind=", "");
@ -31,7 +33,7 @@ fn handle(commands: AdapterCommands) -> Result<(), LSError> {
match commands {
AdapterCommands::Discover(mut commands) => {
let (extra, test_kind) = pick_test_from_extra(&mut commands.extra).unwrap();
test_kind.disover(DiscoverArgs { extra, ..commands })?;
test_kind.discover(DiscoverArgs { extra, ..commands })?;
Ok(())
}
AdapterCommands::RunFileTest(mut commands) => {
@ -41,14 +43,16 @@ fn handle(commands: AdapterCommands) -> Result<(), LSError> {
}
AdapterCommands::DetectWorkspace(mut commands) => {
let (extra, test_kind) = pick_test_from_extra(&mut commands.extra)?;
test_kind.detect_workspaces_root(DetectWorkspaceArgs { extra, ..commands })?;
test_kind.detect_workspaces(DetectWorkspaceArgs { extra, ..commands })?;
Ok(())
}
}
}
fn main() {
let _guard = Log::init().expect("Failed to initialize logger");
let args = AdapterCommands::parse();
tracing::info!("adapter args={:#?}", args);
if let Err(error) = handle(args) {
io::stderr()
.write_all(format!("{:#?}", error).as_bytes())
@ -60,10 +64,8 @@ fn main() {
mod tests {
use super::*;
use crate::runner::cargo_test::CargoTestRunner;
use crate::runner::jest::JestRunner;
#[test]
// If `--test-kind=<value>` is not present, then return Err
fn error_test_kind_detection() {
let mut extra = vec![];
pick_test_from_extra(&mut extra).unwrap_err();
@ -72,22 +74,20 @@ mod tests {
}
#[test]
// If `--test-kind=<value>` is present, then return Ok(value)
fn test_kind_detection() {
fn single_test_kind_detection() {
let mut extra = vec!["--test-kind=cargo-test".to_string()];
let (_, language) = pick_test_from_extra(&mut extra).unwrap();
assert_eq!(language, AvailableTestKind::CargoTest(CargoTestRunner));
}
#[test]
// If multiple `--test-kind=<value>` are present, then return first one
fn error_multiple_test_kind_detection() {
fn multiple_test_kind_results_first_kind() {
let mut extra = vec![
"--test-kind=cargo-test".to_string(),
"--test-kind=jest".to_string(),
"--test-kind=foo".to_string(),
];
let (_, test_kind) = pick_test_from_extra(&mut extra).unwrap();
assert_eq!(test_kind, AvailableTestKind::Jest(JestRunner));
assert_eq!(test_kind, AvailableTestKind::CargoTest(CargoTestRunner));
}
}

View file

@ -1,4 +1,10 @@
use crate::runner::cargo_nextest::CargoNextestRunner;
use crate::runner::cargo_test::CargoTestRunner;
use crate::runner::deno::DenoRunner;
use crate::runner::go::GoTestRunner;
use crate::runner::node_test::NodeTestRunner;
use crate::runner::phpunit::PhpunitRunner;
use crate::runner::vitest::VitestRunner;
use std::str::FromStr;
use testing_language_server::error::LSError;
use testing_language_server::spec::DetectWorkspaceArgs;
@ -10,27 +16,51 @@ use crate::runner::jest::JestRunner;
#[derive(Debug, Eq, PartialEq)]
pub enum AvailableTestKind {
CargoTest(CargoTestRunner),
CargoNextest(CargoNextestRunner),
Jest(JestRunner),
Vitest(VitestRunner),
Deno(DenoRunner),
GoTest(GoTestRunner),
Phpunit(PhpunitRunner),
NodeTest(NodeTestRunner),
}
impl Runner for AvailableTestKind {
fn disover(&self, args: DiscoverArgs) -> Result<(), LSError> {
fn discover(&self, args: DiscoverArgs) -> Result<(), LSError> {
match self {
AvailableTestKind::CargoTest(runner) => runner.disover(args),
AvailableTestKind::Jest(runner) => runner.disover(args),
AvailableTestKind::CargoTest(runner) => runner.discover(args),
AvailableTestKind::CargoNextest(runner) => runner.discover(args),
AvailableTestKind::Jest(runner) => runner.discover(args),
AvailableTestKind::Deno(runner) => runner.discover(args),
AvailableTestKind::GoTest(runner) => runner.discover(args),
AvailableTestKind::Vitest(runner) => runner.discover(args),
AvailableTestKind::Phpunit(runner) => runner.discover(args),
AvailableTestKind::NodeTest(runner) => runner.discover(args),
}
}
fn run_file_test(&self, args: RunFileTestArgs) -> Result<(), LSError> {
match self {
AvailableTestKind::CargoTest(runner) => runner.run_file_test(args),
AvailableTestKind::CargoNextest(runner) => runner.run_file_test(args),
AvailableTestKind::Jest(runner) => runner.run_file_test(args),
AvailableTestKind::Deno(runner) => runner.run_file_test(args),
AvailableTestKind::GoTest(runner) => runner.run_file_test(args),
AvailableTestKind::Vitest(runner) => runner.run_file_test(args),
AvailableTestKind::Phpunit(runner) => runner.run_file_test(args),
AvailableTestKind::NodeTest(runner) => runner.run_file_test(args),
}
}
fn detect_workspaces_root(&self, args: DetectWorkspaceArgs) -> Result<(), LSError> {
fn detect_workspaces(&self, args: DetectWorkspaceArgs) -> Result<(), LSError> {
match self {
AvailableTestKind::CargoTest(runner) => runner.detect_workspaces_root(args),
AvailableTestKind::Jest(runner) => runner.detect_workspaces_root(args),
AvailableTestKind::CargoTest(runner) => runner.detect_workspaces(args),
AvailableTestKind::CargoNextest(runner) => runner.detect_workspaces(args),
AvailableTestKind::Jest(runner) => runner.detect_workspaces(args),
AvailableTestKind::Deno(runner) => runner.detect_workspaces(args),
AvailableTestKind::GoTest(runner) => runner.detect_workspaces(args),
AvailableTestKind::Vitest(runner) => runner.detect_workspaces(args),
AvailableTestKind::Phpunit(runner) => runner.detect_workspaces(args),
AvailableTestKind::NodeTest(runner) => runner.detect_workspaces(args),
}
}
}
@ -41,14 +71,20 @@ impl FromStr for AvailableTestKind {
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"cargo-test" => Ok(AvailableTestKind::CargoTest(CargoTestRunner)),
"cargo-nextest" => Ok(AvailableTestKind::CargoNextest(CargoNextestRunner)),
"jest" => Ok(AvailableTestKind::Jest(JestRunner)),
"go-test" => Ok(AvailableTestKind::GoTest(GoTestRunner)),
"vitest" => Ok(AvailableTestKind::Vitest(VitestRunner)),
"deno" => Ok(AvailableTestKind::Deno(DenoRunner)),
"phpunit" => Ok(AvailableTestKind::Phpunit(PhpunitRunner)),
"node-test" => Ok(AvailableTestKind::NodeTest(NodeTestRunner)),
_ => Err(anyhow::anyhow!("Unknown test kind: {}", s)),
}
}
}
pub trait Runner {
fn disover(&self, args: DiscoverArgs) -> Result<(), LSError>;
fn discover(&self, args: DiscoverArgs) -> Result<(), LSError>;
fn run_file_test(&self, args: RunFileTestArgs) -> Result<(), LSError>;
fn detect_workspaces_root(&self, args: DetectWorkspaceArgs) -> Result<(), LSError>;
fn detect_workspaces(&self, args: DetectWorkspaceArgs) -> Result<(), LSError>;
}

View file

@ -0,0 +1,224 @@
use crate::runner::util::send_stdout;
use std::path::PathBuf;
use std::process::Output;
use std::str::FromStr;
use testing_language_server::error::LSError;
use testing_language_server::spec::DetectWorkspaceResult;
use testing_language_server::spec::RunFileTestResult;
use testing_language_server::spec::DiscoverResult;
use testing_language_server::spec::FoundFileTests;
use testing_language_server::spec::TestItem;
use crate::model::Runner;
use super::util::detect_workspaces_from_file_list;
use super::util::discover_rust_tests;
use super::util::parse_cargo_diagnostics;
use super::util::write_result_log;
fn detect_workspaces(file_paths: &[String]) -> DetectWorkspaceResult {
detect_workspaces_from_file_list(file_paths, &["Cargo.toml".to_string()])
}
#[derive(Eq, PartialEq, Hash, Debug)]
pub struct CargoNextestRunner;
impl Runner for CargoNextestRunner {
#[tracing::instrument(skip(self))]
fn discover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
let file_paths = args.file_paths;
let mut discover_results: DiscoverResult = DiscoverResult { data: vec![] };
for file_path in file_paths {
let tests = discover_rust_tests(&file_path)?;
discover_results.data.push(FoundFileTests {
tests,
path: file_path,
});
}
send_stdout(&discover_results)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn run_file_test(
&self,
args: testing_language_server::spec::RunFileTestArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let discovered_tests: Vec<TestItem> = file_paths
.iter()
.map(|path| discover_rust_tests(path))
.filter_map(Result::ok)
.flatten()
.collect::<Vec<_>>();
let test_ids = discovered_tests
.iter()
.map(|item| item.id.clone())
.collect::<Vec<String>>();
let workspace_root = args.workspace;
let test_result = std::process::Command::new("cargo")
.current_dir(&workspace_root)
.arg("nextest")
.arg("run")
.arg("--workspace")
.arg("--no-fail-fast")
.args(args.extra)
.arg("--")
.args(&test_ids)
.output()
.unwrap();
let output = test_result;
write_result_log("cargo_nextest.log", &output)?;
let Output {
stdout,
stderr,
status,
} = output;
let unexpected_status_code = status.code().map(|code| code != 100);
if stdout.is_empty() && !stderr.is_empty() && unexpected_status_code.unwrap_or(false) {
return Err(LSError::Adapter(String::from_utf8(stderr).unwrap()));
}
let test_result = String::from_utf8(stderr)?;
let diagnostics: RunFileTestResult = parse_cargo_diagnostics(
&test_result,
PathBuf::from_str(&workspace_root).unwrap(),
&file_paths,
&discovered_tests,
);
send_stdout(&diagnostics)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn detect_workspaces(
&self,
args: testing_language_server::spec::DetectWorkspaceArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let detect_result = detect_workspaces(&file_paths);
send_stdout(&detect_result)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use lsp_types::{Diagnostic, DiagnosticSeverity, Position, Range};
use testing_language_server::spec::{FileDiagnostics, TestItem};
use crate::runner::util::MAX_CHAR_LENGTH;
use super::*;
#[test]
fn parse_test_results() {
let fixture = r#"
running 1 test
test rocks::dependency::tests::parse_dependency ... FAILED
failures:
Finished test [unoptimized + debuginfo] target(s) in 0.12s
Starting 1 test across 2 binaries (17 skipped)
FAIL [ 0.004s] rocks-lib rocks::dependency::tests::parse_dependency
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 17 filtered out; finis
hed in 0.00s
--- STDERR: rocks-lib rocks::dependency::tests::parse_dependency ---
thread 'rocks::dependency::tests::parse_dependency' panicked at rocks-lib/src/rocks/dependency.rs:86:64:
called `Result::unwrap()` on an `Err` value: unexpected end of input while parsing min or version number
Location:
rocks-lib/src/rocks/dependency.rs:62:22
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
"#;
let file_paths =
vec!["/home/example/projects/rocks-lib/src/rocks/dependency.rs".to_string()];
let test_items: Vec<TestItem> = vec![TestItem {
id: "rocks::dependency::tests::parse_dependency".to_string(),
name: "rocks::dependency::tests::parse_dependency".to_string(),
path: "/home/example/projects/rocks-lib/src/rocks/dependency.rs".to_string(),
start_position: Range {
start: Position {
line: 85,
character: 63,
},
end: Position {
line: 85,
character: MAX_CHAR_LENGTH,
},
},
end_position: Range {
start: Position {
line: 85,
character: 63,
},
end: Position {
line: 85,
character: MAX_CHAR_LENGTH,
},
},
}];
let diagnostics: RunFileTestResult = parse_cargo_diagnostics(
fixture,
PathBuf::from_str("/home/example/projects").unwrap(),
&file_paths,
&test_items,
);
let message = r#"called `Result::unwrap()` on an `Err` value: unexpected end of input while parsing min or version number
Location:
rocks-lib/src/rocks/dependency.rs:62:22
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
"#;
assert_eq!(
diagnostics,
RunFileTestResult {
data: vec![FileDiagnostics {
path: file_paths.first().unwrap().to_owned(),
diagnostics: vec![Diagnostic {
range: Range {
start: Position {
line: 85,
character: 63
},
end: Position {
line: 85,
character: MAX_CHAR_LENGTH
}
},
message: message.to_string(),
severity: Some(DiagnosticSeverity::ERROR),
..Diagnostic::default()
}]
}],
messages: vec!()
}
)
}
#[test]
fn test_discover() {
let file_path = "../../demo/rust/src/lib.rs";
discover_rust_tests(file_path).unwrap();
}
#[test]
fn test_detect_workspaces() {
let current_dir = std::env::current_dir().unwrap();
let librs = current_dir.join("src/lib.rs");
let mainrs = current_dir.join("src/main.rs");
let absolute_path_of_demo = current_dir.join("../../demo/rust");
let demo_librs = absolute_path_of_demo.join("src/lib.rs");
let file_paths: Vec<String> = [librs, mainrs, demo_librs]
.iter()
.map(|file_path| file_path.to_str().unwrap().to_string())
.collect();
let workspaces = detect_workspaces(&file_paths);
assert_eq!(workspaces.data.len(), 2);
assert!(workspaces
.data
.contains_key(absolute_path_of_demo.to_str().unwrap()));
assert!(workspaces.data.contains_key(current_dir.to_str().unwrap()));
}
}

View file

@ -1,281 +1,110 @@
use std::collections::HashMap;
use crate::runner::util::send_stdout;
use std::path::PathBuf;
use std::process::Output;
use std::str::FromStr;
use testing_language_server::error::LSError;
use testing_language_server::spec::DetectWorkspaceResult;
use testing_language_server::spec::RunFileTestResult;
use testing_language_server::spec::TestItem;
use tree_sitter::Point;
use tree_sitter::Query;
use tree_sitter::QueryCursor;
use lsp_types::{Diagnostic, Position, Range};
use regex::Regex;
use testing_language_server::spec::DiscoverResult;
use testing_language_server::spec::DiscoverResultItem;
use testing_language_server::spec::RunFileTestResultItem;
use testing_language_server::spec::FoundFileTests;
use testing_language_server::spec::TestItem;
use crate::model::Runner;
// If the character value is greater than the line length it defaults back to the line length.
const MAX_CHAR_LENGTH: u32 = 10000;
use super::util::detect_workspaces_from_file_list;
use super::util::discover_rust_tests;
use super::util::parse_cargo_diagnostics;
use super::util::write_result_log;
fn parse_diagnostics(
contents: &str,
workspace_root: PathBuf,
file_paths: &[String],
) -> RunFileTestResult {
let contents = contents.replace("\r\n", "\n");
let lines = contents.lines();
let mut result_map: HashMap<String, Vec<Diagnostic>> = HashMap::new();
for (i, line) in lines.clone().enumerate() {
let re = Regex::new(r"thread '([^']+)' panicked at ([^:]+):(\d+):(\d+):").unwrap();
if let Some(m) = re.captures(line) {
let mut message = String::new();
let file = m.get(2).unwrap().as_str().to_string();
if let Some(file_path) = file_paths
.iter()
.find(|path| path.contains(workspace_root.join(&file).to_str().unwrap()))
{
let lnum = m.get(3).unwrap().as_str().parse::<u32>().unwrap() - 1;
let col = m.get(4).unwrap().as_str().parse::<u32>().unwrap() - 1;
let mut next_i = i + 1;
while next_i < lines.clone().count()
&& !lines.clone().nth(next_i).unwrap().is_empty()
{
message = format!("{}{}\n", message, lines.clone().nth(next_i).unwrap());
next_i += 1;
}
let diagnostic = Diagnostic {
range: Range {
start: Position {
line: lnum,
character: col,
},
end: Position {
line: lnum,
character: MAX_CHAR_LENGTH,
},
},
message,
..Diagnostic::default()
};
result_map
.entry(file_path.to_string())
.or_default()
.push(diagnostic);
} else {
continue;
}
}
}
result_map
.into_iter()
.map(|(path, diagnostics)| RunFileTestResultItem { path, diagnostics })
.collect()
}
fn discover(file_path: &str) -> Result<Vec<TestItem>, LSError> {
let mut parser = tree_sitter::Parser::new();
let mut test_items: Vec<TestItem> = vec![];
parser
.set_language(&tree_sitter_rust::language())
.expect("Error loading Rust grammar");
let source_code = std::fs::read_to_string(file_path)?;
let tree = parser.parse(&source_code, None).unwrap();
let query_string = r#"
(
(attribute_item
[
(attribute
(identifier) @macro_name
)
(attribute
[
(identifier) @macro_name
(scoped_identifier
name: (identifier) @macro_name
)
]
)
]
)
[
(attribute_item
(attribute
(identifier)
)
)
(line_comment)
]*
.
(function_item
name: (identifier) @test.name
) @test.definition
(#any-of? @macro_name "test" "rstest" "case")
)
(mod_item name: (identifier) @namespace.name)? @namespace.definition
"#;
let query =
Query::new(&tree_sitter_rust::language(), query_string).expect("Error creating query");
let mut cursor = QueryCursor::new();
cursor.set_byte_range(tree.root_node().byte_range());
let source = source_code.as_bytes();
let matches = cursor.matches(&query, tree.root_node(), source);
for m in matches {
let mut namespace_name = "";
let mut test_start_position = Point::default();
let mut test_end_position = Point::default();
for capture in m.captures {
let capture_name = query.capture_names()[capture.index as usize];
let value = capture.node.utf8_text(source)?;
let start_position = capture.node.start_position();
let end_position = capture.node.end_position();
match capture_name {
"namespace.name" => {
namespace_name = value;
}
"test.definition" => {
test_start_position = start_position;
test_end_position = end_position;
}
"test.name" => {
let test_name = value;
let test_item = TestItem {
id: format!("{}:{}", namespace_name, test_name),
name: test_name.to_string(),
start_position: Range {
start: Position {
line: test_start_position.row as u32,
character: test_start_position.column as u32,
},
end: Position {
line: test_start_position.row as u32,
character: MAX_CHAR_LENGTH,
},
},
end_position: Range {
start: Position {
line: test_end_position.row as u32,
character: 0,
},
end: Position {
line: test_end_position.row as u32,
character: test_end_position.column as u32,
},
},
};
test_items.push(test_item);
test_start_position = Point::default();
test_end_position = Point::default();
}
_ => {}
}
}
}
Ok(test_items)
}
fn detect_workspace_from_file(file_path: PathBuf) -> Option<String> {
let parent = file_path.parent();
if let Some(parent) = parent {
if parent.join("Cargo.toml").exists() {
return Some(parent.to_string_lossy().to_string());
} else {
detect_workspace_from_file(parent.to_path_buf())
}
} else {
None
}
}
fn detect_workspaces(file_paths: Vec<String>) -> Result<DetectWorkspaceResult, LSError> {
let mut result_map: HashMap<String, Vec<String>> = HashMap::new();
let mut file_paths = file_paths.clone();
file_paths.sort_by_key(|b| std::cmp::Reverse(b.len()));
for file_path in file_paths {
let existing_workspace = result_map
.iter()
.find(|(workspace_root, _)| file_path.contains(workspace_root.as_str()));
if let Some((workspace_root, _)) = existing_workspace {
result_map
.entry(workspace_root.to_string())
.or_default()
.push(file_path);
} else {
let workspace = detect_workspace_from_file(PathBuf::from_str(&file_path).unwrap());
if let Some(workspace) = workspace {
result_map.entry(workspace).or_default().push(file_path);
}
}
}
Ok(result_map)
fn detect_workspaces(file_paths: &[String]) -> DetectWorkspaceResult {
detect_workspaces_from_file_list(file_paths, &["Cargo.toml".to_string()])
}
#[derive(Eq, PartialEq, Hash, Debug)]
pub struct CargoTestRunner;
impl Runner for CargoTestRunner {
fn disover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
#[tracing::instrument(skip(self))]
fn discover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
let file_paths = args.file_paths;
let mut discover_results: DiscoverResult = vec![];
let mut discover_results: DiscoverResult = DiscoverResult { data: vec![] };
for file_path in file_paths {
let tests = discover(&file_path)?;
discover_results.push(DiscoverResultItem {
let tests = discover_rust_tests(&file_path)?;
discover_results.data.push(FoundFileTests {
tests,
path: file_path,
});
}
serde_json::to_writer(std::io::stdout(), &discover_results)?;
send_stdout(&discover_results)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn run_file_test(
&self,
args: testing_language_server::spec::RunFileTestArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let workspace_root = args.workspace_root;
let discovered_tests: Vec<TestItem> = file_paths
.iter()
.map(|path| discover_rust_tests(path))
.filter_map(Result::ok)
.flatten()
.collect::<Vec<_>>();
let test_ids = discovered_tests
.iter()
.map(|item| item.id.clone())
.collect::<Vec<String>>();
let workspace_root = args.workspace;
let test_result = std::process::Command::new("cargo")
.current_dir(&workspace_root)
.arg("test")
.args(args.extra)
.arg("--")
.args(&test_ids)
.output()
.unwrap();
let Output { stdout, stderr, .. } = test_result;
if stdout.is_empty() && !stderr.is_empty() {
let output = test_result;
write_result_log("cargo_test.log", &output)?;
let Output { stdout, stderr, .. } = output;
if stdout.is_empty() {
return Err(LSError::Adapter(String::from_utf8(stderr).unwrap()));
}
let test_result = String::from_utf8(stdout)?;
let diagnostics: RunFileTestResult = parse_diagnostics(
// When `--nocapture` option is set, stderr has some important information
// to parse test result
let test_result = String::from_utf8(stderr)? + &String::from_utf8(stdout)?;
let diagnostics: RunFileTestResult = parse_cargo_diagnostics(
&test_result,
PathBuf::from_str(&workspace_root).unwrap(),
&file_paths,
&discovered_tests,
);
serde_json::to_writer(std::io::stdout(), &diagnostics)?;
send_stdout(&diagnostics)?;
Ok(())
}
fn detect_workspaces_root(
#[tracing::instrument(skip(self))]
fn detect_workspaces(
&self,
args: testing_language_server::spec::DetectWorkspaceArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let detect_result = detect_workspaces(file_paths)?;
serde_json::to_writer(std::io::stdout(), &detect_result)?;
let detect_result = detect_workspaces(&file_paths);
send_stdout(&detect_result)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use lsp_types::{Diagnostic, DiagnosticSeverity, Position, Range};
use testing_language_server::spec::FileDiagnostics;
use crate::runner::util::MAX_CHAR_LENGTH;
use super::*;
#[test]
@ -299,10 +128,36 @@ note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
"#;
let file_paths =
vec!["/home/example/projects/rocks-lib/src/rocks/dependency.rs".to_string()];
let diagnostics: RunFileTestResult = parse_diagnostics(
let test_items: Vec<TestItem> = vec![TestItem {
id: "rocks::dependency::tests::parse_dependency".to_string(),
name: "rocks::dependency::tests::parse_dependency".to_string(),
path: "/home/example/projects/rocks-lib/src/rocks/dependency.rs".to_string(),
start_position: Range {
start: Position {
line: 85,
character: 63,
},
end: Position {
line: 85,
character: MAX_CHAR_LENGTH,
},
},
end_position: Range {
start: Position {
line: 85,
character: 63,
},
end: Position {
line: 85,
character: MAX_CHAR_LENGTH,
},
},
}];
let diagnostics: RunFileTestResult = parse_cargo_diagnostics(
fixture,
PathBuf::from_str("/home/example/projects").unwrap(),
&file_paths,
&test_items,
);
let message = r#"called `Result::unwrap()` on an `Err` value: unexpected end of input while parsing min or version number
Location:
@ -312,30 +167,34 @@ note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
assert_eq!(
diagnostics,
vec![RunFileTestResultItem {
path: file_paths.first().unwrap().to_owned(),
diagnostics: vec![Diagnostic {
range: Range {
start: Position {
line: 85,
character: 63
RunFileTestResult {
data: vec![FileDiagnostics {
path: file_paths.first().unwrap().to_owned(),
diagnostics: vec![Diagnostic {
range: Range {
start: Position {
line: 85,
character: 63
},
end: Position {
line: 85,
character: MAX_CHAR_LENGTH
}
},
end: Position {
line: 85,
character: MAX_CHAR_LENGTH
}
},
message: message.to_string(),
..Diagnostic::default()
}]
}]
message: message.to_string(),
severity: Some(DiagnosticSeverity::ERROR),
..Diagnostic::default()
}]
}],
messages: vec![]
}
)
}
#[test]
fn test_discover() {
let file_path = "../../test_proj/rust/src/lib.rs";
discover(file_path).unwrap();
let file_path = "../../demo/rust/src/lib.rs";
discover_rust_tests(file_path).unwrap();
}
#[test]
@ -343,16 +202,18 @@ note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
let current_dir = std::env::current_dir().unwrap();
let librs = current_dir.join("src/lib.rs");
let mainrs = current_dir.join("src/main.rs");
let absolute_path_of_test_proj = current_dir.join("../../test_proj/rust");
let test_proj_librs = absolute_path_of_test_proj.join("src/lib.rs");
let file_paths: Vec<String> = [librs, mainrs, test_proj_librs]
let absolute_path_of_demo = current_dir.join("../../demo/rust");
let demo_librs = absolute_path_of_demo.join("src/lib.rs");
let file_paths: Vec<String> = [librs, mainrs, demo_librs]
.iter()
.map(|file_path| file_path.to_str().unwrap().to_string())
.collect();
let workspaces = detect_workspaces(file_paths).unwrap();
assert_eq!(workspaces.len(), 2);
assert!(workspaces.contains_key(&absolute_path_of_test_proj.to_str().unwrap().to_string()));
assert!(workspaces.contains_key(&current_dir.to_str().unwrap().to_string()));
let workspaces = detect_workspaces(&file_paths);
assert_eq!(workspaces.data.len(), 2);
assert!(workspaces
.data
.contains_key(absolute_path_of_demo.to_str().unwrap()));
assert!(workspaces.data.contains_key(current_dir.to_str().unwrap()));
}
}

View file

@ -0,0 +1,341 @@
use crate::runner::util::resolve_path;
use crate::runner::util::send_stdout;
use lsp_types::Diagnostic;
use lsp_types::DiagnosticSeverity;
use lsp_types::Position;
use lsp_types::Range;
use regex::Regex;
use std::collections::HashMap;
use std::path::PathBuf;
use std::process::Output;
use std::str::FromStr;
use testing_language_server::error::LSError;
use testing_language_server::spec::DetectWorkspaceResult;
use testing_language_server::spec::DiscoverResult;
use testing_language_server::spec::FileDiagnostics;
use testing_language_server::spec::FoundFileTests;
use testing_language_server::spec::RunFileTestResult;
use testing_language_server::spec::TestItem;
use crate::model::Runner;
use super::util::clean_ansi;
use super::util::detect_workspaces_from_file_list;
use super::util::discover_with_treesitter;
use super::util::write_result_log;
use super::util::MAX_CHAR_LENGTH;
fn get_position_from_output(line: &str) -> Option<(String, u32, u32)> {
let re = Regex::new(r"=> (?P<file>.*):(?P<line>\d+):(?P<column>\d+)").unwrap();
if let Some(captures) = re.captures(line) {
let file = captures.name("file").unwrap().as_str().to_string();
let line = captures.name("line").unwrap().as_str().parse().unwrap();
let column = captures.name("column").unwrap().as_str().parse().unwrap();
Some((file, line, column))
} else {
None
}
}
fn parse_diagnostics(
contents: &str,
workspace_root: PathBuf,
file_paths: &[String],
) -> Result<RunFileTestResult, LSError> {
let contents = clean_ansi(&contents.replace("\r\n", "\n"));
let lines = contents.lines();
let mut result_map: HashMap<String, Vec<Diagnostic>> = HashMap::new();
let mut file_name: Option<String> = None;
let mut lnum: Option<u32> = None;
let mut message = String::new();
let mut error_exists = false;
for line in lines {
if line.contains("ERRORS") {
error_exists = true;
} else if !error_exists {
continue;
}
if let Some(position) = get_position_from_output(line) {
if file_name.is_some() {
let diagnostic = Diagnostic {
range: Range {
start: Position {
line: lnum.unwrap(),
character: 1,
},
end: Position {
line: lnum.unwrap(),
character: MAX_CHAR_LENGTH,
},
},
message: message.clone(),
severity: Some(DiagnosticSeverity::ERROR),
..Diagnostic::default()
};
let file_path = resolve_path(&workspace_root, file_name.as_ref().unwrap())
.to_str()
.unwrap()
.to_string();
if file_paths.contains(&file_path) {
result_map.entry(file_path).or_default().push(diagnostic);
}
}
file_name = Some(position.0);
lnum = Some(position.1);
} else {
message += line;
}
}
Ok(RunFileTestResult {
data: result_map
.into_iter()
.map(|(path, diagnostics)| FileDiagnostics { path, diagnostics })
.collect(),
messages: vec![],
})
}
fn detect_workspaces(file_paths: Vec<String>) -> DetectWorkspaceResult {
detect_workspaces_from_file_list(&file_paths, &["deno.json".to_string()])
}
fn discover(file_path: &str) -> Result<Vec<TestItem>, LSError> {
// from https://github.com/MarkEmmons/neotest-deno/blob/7136b9342aeecb675c7c16a0bde327d7fcb00a1c/lua/neotest-deno/init.lua#L93
// license: https://github.com/MarkEmmons/neotest-deno/blob/main/LICENSE
let query = r#"
;; Deno.test
(call_expression
function: (member_expression) @func_name (#match? @func_name "^Deno.test$")
arguments: [
(arguments ((string) @test.name . (arrow_function)))
(arguments . (function_expression name: (identifier) @test.name))
(arguments . (object(pair
key: (property_identifier) @key (#match? @key "^name$")
value: (string) @test.name
)))
(arguments ((string) @test.name . (object) . (arrow_function)))
(arguments (object) . (function_expression name: (identifier) @test.name))
]
) @test.definition
;; BDD describe - nested
(call_expression
function: (identifier) @func_name (#match? @func_name "^describe$")
arguments: [
(arguments ((string) @namespace.name . (arrow_function)))
(arguments ((string) @namespace.name . (function_expression)))
]
) @namespace.definition
;; BDD describe - flat
(variable_declarator
name: (identifier) @namespace.id
value: (call_expression
function: (identifier) @func_name (#match? @func_name "^describe")
arguments: [
(arguments ((string) @namespace.name))
(arguments (object (pair
key: (property_identifier) @key (#match? @key "^name$")
value: (string) @namespace.name
)))
]
)
) @namespace.definition
;; BDD it
(call_expression
function: (identifier) @func_name (#match? @func_name "^it$")
arguments: [
(arguments ((string) @test.name . (arrow_function)))
(arguments ((string) @test.name . (function_expression)))
]
) @test.definition
"#;
discover_with_treesitter(file_path, &tree_sitter_javascript::language(), query)
}
#[derive(Eq, PartialEq, Debug)]
pub struct DenoRunner;
impl Runner for DenoRunner {
#[tracing::instrument(skip(self))]
fn discover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
let file_paths = args.file_paths;
let mut discover_results: DiscoverResult = DiscoverResult { data: vec![] };
for file_path in file_paths {
discover_results.data.push(FoundFileTests {
tests: discover(&file_path)?,
path: file_path,
})
}
send_stdout(&discover_results)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn run_file_test(
&self,
args: testing_language_server::spec::RunFileTestArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let workspace = args.workspace;
let output = std::process::Command::new("deno")
.current_dir(&workspace)
.args(["test", "--no-prompt"])
.args(&file_paths)
.output()
.unwrap();
write_result_log("deno.log", &output)?;
let Output { stdout, stderr, .. } = output;
if stdout.is_empty() {
return Err(LSError::Adapter(String::from_utf8(stderr).unwrap()));
}
let test_result = String::from_utf8(stdout)?;
let diagnostics: RunFileTestResult = parse_diagnostics(
&test_result,
PathBuf::from_str(&workspace).unwrap(),
&file_paths,
)?;
send_stdout(&diagnostics)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn detect_workspaces(
&self,
args: testing_language_server::spec::DetectWorkspaceArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let detect_result = detect_workspaces(file_paths);
send_stdout(&detect_result)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::env::current_dir;
use super::*;
#[test]
fn test_parse_diagnostics() {
let test_result = std::env::current_dir()
.unwrap()
.join("../../demo/deno/output.txt");
let test_result = std::fs::read_to_string(test_result).unwrap();
let workspace = PathBuf::from_str("/home/demo/test/dneo/").unwrap();
let target_file_path = "/home/demo/test/dneo/main_test.ts";
let diagnostics =
parse_diagnostics(&test_result, workspace, &[target_file_path.to_string()]).unwrap();
assert_eq!(diagnostics.data.len(), 1);
}
#[test]
fn test_detect_workspace() {
let current_dir = std::env::current_dir().unwrap();
let absolute_path_of_demo = current_dir.join("../../demo/deno");
let test_file = absolute_path_of_demo.join("main.test.ts");
let file_paths: Vec<String> = [test_file]
.iter()
.map(|file_path| file_path.to_str().unwrap().to_string())
.collect();
let detect_result = detect_workspaces(file_paths);
assert_eq!(detect_result.data.len(), 1);
detect_result.data.iter().for_each(|(workspace, _)| {
assert_eq!(workspace, absolute_path_of_demo.to_str().unwrap());
});
}
#[test]
fn test_discover() {
let file_path = current_dir().unwrap().join("../../demo/deno/main_test.ts");
let file_path = file_path.to_str().unwrap();
let test_items = discover(file_path).unwrap();
assert_eq!(test_items.len(), 3);
assert_eq!(
test_items,
vec![
TestItem {
id: String::from("addTest"),
name: String::from("addTest"),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 7,
character: 0
},
end: Position {
line: 7,
character: 10000
}
},
end_position: Range {
start: Position {
line: 9,
character: 0
},
end: Position {
line: 9,
character: 2
}
}
},
TestItem {
id: String::from("fail1"),
name: String::from("fail1"),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 11,
character: 0
},
end: Position {
line: 11,
character: 10000
}
},
end_position: Range {
start: Position {
line: 13,
character: 0
},
end: Position {
line: 13,
character: 2
}
}
},
TestItem {
id: String::from("fail2"),
name: String::from("fail2"),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 15,
character: 0
},
end: Position {
line: 15,
character: 10000
}
},
end_position: Range {
start: Position {
line: 17,
character: 0
},
end: Position {
line: 17,
character: 2
}
}
}
]
)
}
}

View file

@ -0,0 +1,326 @@
use crate::model::Runner;
use crate::runner::util::send_stdout;
use anyhow::anyhow;
use lsp_types::Diagnostic;
use lsp_types::DiagnosticSeverity;
use lsp_types::Position;
use lsp_types::Range;
use regex::Regex;
use serde::Deserialize;
use std::collections::HashMap;
use std::path::PathBuf;
use std::process::Output;
use std::str::FromStr;
use testing_language_server::error::LSError;
use testing_language_server::spec::DiscoverResult;
use testing_language_server::spec::FileDiagnostics;
use testing_language_server::spec::FoundFileTests;
use testing_language_server::spec::RunFileTestResult;
use testing_language_server::spec::TestItem;
use super::util::detect_workspaces_from_file_list;
use super::util::discover_with_treesitter;
use super::util::write_result_log;
use super::util::MAX_CHAR_LENGTH;
#[derive(Deserialize, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
enum Action {
Start,
Run,
Output,
Fail,
Pass,
}
#[allow(dead_code)]
#[derive(Deserialize)]
#[serde(rename_all = "PascalCase")]
struct TestResultLine {
time: String,
action: Action,
package: String,
test: Option<String>,
output: Option<String>,
}
fn get_position_from_output(output: &str) -> Option<(String, u32)> {
let pattern = r"^\s{4}(.*_test\.go):(\d+):";
let re = Regex::new(pattern).unwrap();
if let Some(captures) = re.captures(output) {
if let (Some(file_name), Some(lnum)) = (captures.get(1), captures.get(2)) {
return Some((
file_name.as_str().to_string(),
lnum.as_str().parse::<u32>().unwrap() - 1,
));
}
}
None
}
fn get_log_from_output(output: &str) -> String {
output.replace(" ", "")
}
fn parse_diagnostics(
contents: &str,
workspace_root: PathBuf,
file_paths: &[String],
) -> Result<RunFileTestResult, LSError> {
let contents = contents.replace("\r\n", "\n");
let lines = contents.lines();
let mut result_map: HashMap<String, Vec<Diagnostic>> = HashMap::new();
let mut file_name: Option<String> = None;
let mut lnum: Option<u32> = None;
let mut message = String::new();
let mut last_action: Option<Action> = None;
for line in lines {
let value: TestResultLine = serde_json::from_str(line).map_err(|e| anyhow!("{:?}", e))?;
match value.action {
Action::Run => {
file_name = None;
message = String::new();
}
Action::Output => {
let output = &value.output.unwrap();
if let Some((detected_file_name, detected_lnum)) = get_position_from_output(output)
{
file_name = Some(detected_file_name);
lnum = Some(detected_lnum);
message = String::new();
} else {
message += &get_log_from_output(output);
}
}
_ => {}
}
let current_action = value.action;
let is_action_changed = last_action.as_ref() != Some(&current_action);
if is_action_changed {
last_action = Some(current_action);
} else {
continue;
}
if let (Some(detected_fn), Some(detected_lnum)) = (&file_name, lnum) {
let diagnostic = Diagnostic {
range: Range {
start: Position {
line: detected_lnum,
character: 1,
},
end: Position {
line: detected_lnum,
character: MAX_CHAR_LENGTH,
},
},
message: message.clone(),
severity: Some(DiagnosticSeverity::ERROR),
..Diagnostic::default()
};
let file_path = workspace_root
.join(detected_fn)
.to_str()
.unwrap()
.to_owned();
if file_paths.contains(&file_path) {
result_map.entry(file_path).or_default().push(diagnostic);
}
file_name = None;
lnum = None;
}
}
Ok(RunFileTestResult {
data: result_map
.into_iter()
.map(|(path, diagnostics)| FileDiagnostics { path, diagnostics })
.collect(),
messages: vec![],
})
}
fn discover(file_path: &str) -> Result<Vec<TestItem>, LSError> {
// from https://github.com/nvim-neotest/neotest-go/blob/92950ad7be2ca02a41abca5c6600ff6ffaf5b5d6/lua/neotest-go/init.lua#L54
// license: https://github.com/nvim-neotest/neotest-go/blob/92950ad7be2ca02a41abca5c6600ff6ffaf5b5d6/README.md
let query = r#"
;;query
((function_declaration
name: (identifier) @test.name)
(#match? @test.name "^(Test|Example)"))
@test.definition
(method_declaration
name: (field_identifier) @test.name
(#match? @test.name "^(Test|Example)")) @test.definition
(call_expression
function: (selector_expression
field: (field_identifier) @test.method)
(#match? @test.method "^Run$")
arguments: (argument_list . (interpreted_string_literal) @test.name))
@test.definition
;; query for list table tests
(block
(short_var_declaration
left: (expression_list
(identifier) @test.cases)
right: (expression_list
(composite_literal
(literal_value
(literal_element
(literal_value
(keyed_element
(literal_element
(identifier) @test.field.name)
(literal_element
(interpreted_string_literal) @test.name)))) @test.definition))))
(for_statement
(range_clause
left: (expression_list
(identifier) @test.case)
right: (identifier) @test.cases1
(#eq? @test.cases @test.cases1))
body: (block
(expression_statement
(call_expression
function: (selector_expression
field: (field_identifier) @test.method)
(#match? @test.method "^Run$")
arguments: (argument_list
(selector_expression
operand: (identifier) @test.case1
(#eq? @test.case @test.case1)
field: (field_identifier) @test.field.name1
(#eq? @test.field.name @test.field.name1))))))))
;; query for map table tests
(block
(short_var_declaration
left: (expression_list
(identifier) @test.cases)
right: (expression_list
(composite_literal
(literal_value
(keyed_element
(literal_element
(interpreted_string_literal) @test.name)
(literal_element
(literal_value) @test.definition))))))
(for_statement
(range_clause
left: (expression_list
((identifier) @test.key.name)
((identifier) @test.case))
right: (identifier) @test.cases1
(#eq? @test.cases @test.cases1))
body: (block
(expression_statement
(call_expression
function: (selector_expression
field: (field_identifier) @test.method)
(#match? @test.method "^Run$")
arguments: (argument_list
((identifier) @test.key.name1
(#eq? @test.key.name @test.key.name1))))))))
"#;
discover_with_treesitter(file_path, &tree_sitter_go::language(), query)
}
#[derive(Eq, PartialEq, Hash, Debug)]
pub struct GoTestRunner;
impl Runner for GoTestRunner {
#[tracing::instrument(skip(self))]
fn discover(
&self,
args: testing_language_server::spec::DiscoverArgs,
) -> Result<(), testing_language_server::error::LSError> {
let file_paths = args.file_paths;
let mut discover_results: DiscoverResult = DiscoverResult { data: vec![] };
for file_path in file_paths {
let tests = discover(&file_path)?;
discover_results.data.push(FoundFileTests {
tests,
path: file_path,
});
}
send_stdout(&discover_results)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn run_file_test(
&self,
args: testing_language_server::spec::RunFileTestArgs,
) -> Result<(), testing_language_server::error::LSError> {
let file_paths = args.file_paths;
let default_args = ["-v", "-json", "", "-count=1", "-timeout=60s"];
let workspace = args.workspace;
let output = std::process::Command::new("go")
.current_dir(&workspace)
.arg("test")
.args(default_args)
.args(args.extra)
.output()
.unwrap();
write_result_log("go.log", &output)?;
let Output { stdout, stderr, .. } = output;
if stdout.is_empty() && !stderr.is_empty() {
return Err(LSError::Adapter(String::from_utf8(stderr).unwrap()));
}
let test_result = String::from_utf8(stdout)?;
let diagnostics: RunFileTestResult = parse_diagnostics(
&test_result,
PathBuf::from_str(&workspace).unwrap(),
&file_paths,
)?;
send_stdout(&diagnostics)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn detect_workspaces(
&self,
args: testing_language_server::spec::DetectWorkspaceArgs,
) -> Result<(), testing_language_server::error::LSError> {
send_stdout(&detect_workspaces_from_file_list(
&args.file_paths,
&["go.mod".to_string()],
))?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::runner::go::discover;
use std::str::FromStr;
use std::{fs::read_to_string, path::PathBuf};
use crate::runner::go::parse_diagnostics;
#[test]
fn test_parse_diagnostics() {
let current_dir = std::env::current_dir().unwrap();
let test_file_path = current_dir.join("tests/go-test.txt");
let contents = read_to_string(test_file_path).unwrap();
let workspace = PathBuf::from_str("/home/demo/test/go/src/test").unwrap();
let target_file_path = "/home/demo/test/go/src/test/cases_test.go";
let result =
parse_diagnostics(&contents, workspace, &[target_file_path.to_string()]).unwrap();
let result = result.data.first().unwrap();
assert_eq!(result.path, target_file_path);
let diagnostic = result.diagnostics.first().unwrap();
assert_eq!(diagnostic.range.start.line, 30);
assert_eq!(diagnostic.range.start.character, 1);
assert_eq!(diagnostic.range.end.line, 30);
assert_eq!(diagnostic.message, "\tError Trace:\tcases_test.go:31\n\tError: \tNot equal: \n\t \texpected: 7\n\t \tactual : -1\n\tTest: \tTestSubtract/test_two\n--- FAIL: TestSubtract (0.00s)\n --- FAIL: TestSubtract/test_one (0.00s)\n");
}
#[test]
fn test_discover() {
let file_path = "../../demo/go/cases_test.go";
let test_items = discover(file_path).unwrap();
assert!(!test_items.is_empty());
}
}

View file

@ -1,34 +1,25 @@
use crate::runner::util::send_stdout;
use lsp_types::Diagnostic;
use lsp_types::Position;
use lsp_types::Range;
use regex::Regex;
use lsp_types::DiagnosticSeverity;
use serde_json::Value;
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
use std::str::FromStr;
use tempfile::tempdir;
use testing_language_server::error::LSError;
use testing_language_server::spec::DetectWorkspaceResult;
use testing_language_server::spec::DiscoverResult;
use testing_language_server::spec::DiscoverResultItem;
use testing_language_server::spec::FileDiagnostics;
use testing_language_server::spec::FoundFileTests;
use testing_language_server::spec::RunFileTestResult;
use testing_language_server::spec::RunFileTestResultItem;
use testing_language_server::spec::TestItem;
use tree_sitter::Point;
use tree_sitter::Query;
use tree_sitter::QueryCursor;
use crate::model::Runner;
// If the character value is greater than the line length it defaults back to the line length.
const MAX_CHAR_LENGTH: u32 = 10000;
fn clean_ansi(input: &str) -> String {
let re = Regex::new(r"\x1B\[([0-9]{1,2}(;[0-9]{1,2})*)?[m|K]").unwrap();
re.replace_all(input, "").to_string()
}
use super::util::clean_ansi;
use super::util::detect_workspaces_from_file_list;
use super::util::discover_with_treesitter;
use super::util::LOG_LOCATION;
use super::util::MAX_CHAR_LENGTH;
fn parse_diagnostics(
test_result: &str,
@ -66,6 +57,7 @@ fn parse_diagnostics(
},
},
message,
severity: Some(DiagnosticSeverity::ERROR),
..Diagnostic::default()
};
result_map
@ -75,60 +67,23 @@ fn parse_diagnostics(
})
}
}
Ok(result_map
.into_iter()
.map(|(path, diagnostics)| RunFileTestResultItem { path, diagnostics })
.collect())
Ok(RunFileTestResult {
data: result_map
.into_iter()
.map(|(path, diagnostics)| FileDiagnostics { path, diagnostics })
.collect(),
messages: vec![],
})
}
fn detect_workspace_from_file(file_path: PathBuf) -> Option<String> {
let parent = file_path.parent();
if let Some(parent) = parent {
if parent.join("package.json").exists() {
return Some(parent.to_string_lossy().to_string());
} else {
detect_workspace_from_file(parent.to_path_buf())
}
} else {
None
}
}
fn detect_workspaces(file_paths: Vec<String>) -> Result<DetectWorkspaceResult, LSError> {
let mut result_map: HashMap<String, Vec<String>> = HashMap::new();
let mut file_paths: Vec<String> = file_paths
.into_iter()
.filter(|path| !path.contains("node_modules/"))
.collect();
file_paths.sort_by_key(|b| std::cmp::Reverse(b.len()));
for file_path in file_paths {
let existing_workspace = result_map
.iter()
.find(|(workspace_root, _)| file_path.contains(workspace_root.as_str()));
if let Some((workspace_root, _)) = existing_workspace {
result_map
.entry(workspace_root.to_string())
.or_default()
.push(file_path);
} else {
let workspace = detect_workspace_from_file(PathBuf::from_str(&file_path).unwrap());
if let Some(workspace) = workspace {
result_map.entry(workspace).or_default().push(file_path);
}
}
}
Ok(result_map)
fn detect_workspaces(file_paths: Vec<String>) -> DetectWorkspaceResult {
detect_workspaces_from_file_list(&file_paths, &["package.json".to_string()])
}
fn discover(file_path: &str) -> Result<Vec<TestItem>, LSError> {
let mut parser = tree_sitter::Parser::new();
let mut test_items: Vec<TestItem> = vec![];
parser
.set_language(&tree_sitter_javascript::language())
.expect("Error loading JavaScript grammar");
let source_code = std::fs::read_to_string(file_path)?;
let tree = parser.parse(&source_code, None).unwrap();
let query_string = r#"
// from https://github.com/nvim-neotest/neotest-jest/blob/514fd4eae7da15fd409133086bb8e029b65ac43f/lua/neotest-jest/init.lua#L162
// license: https://github.com/nvim-neotest/neotest-jest/blob/514fd4eae7da15fd409133086bb8e029b65ac43f/LICENSE.md
let query = r#"
; -- Namespaces --
; Matches: `describe('context', () => {})`
((call_expression
@ -197,93 +152,35 @@ fn discover(file_path: &str) -> Result<Vec<TestItem>, LSError> {
arguments: (arguments (string (string_fragment) @test.name) [(arrow_function) (function_expression)])
)) @test.definition
"#;
let query = Query::new(&tree_sitter_javascript::language(), query_string)
.expect("Error creating query");
let mut cursor = QueryCursor::new();
cursor.set_byte_range(tree.root_node().byte_range());
let source = source_code.as_bytes();
let matches = cursor.matches(&query, tree.root_node(), source);
for m in matches {
let mut namespace_name = "";
let mut test_start_position = Point::default();
let mut test_end_position = Point::default();
for capture in m.captures {
let capture_name = query.capture_names()[capture.index as usize];
let value = capture.node.utf8_text(source)?;
let start_position = capture.node.start_position();
let end_position = capture.node.end_position();
match capture_name {
"namespace.name" => {
namespace_name = value;
}
"test.definition" => {
test_start_position = start_position;
test_end_position = end_position;
}
"test.name" => {
let test_name = value;
let test_item = TestItem {
id: format!("{}:{}", namespace_name, test_name),
name: test_name.to_string(),
start_position: Range {
start: Position {
line: test_start_position.row as u32,
character: test_start_position.column as u32,
},
end: Position {
line: test_start_position.row as u32,
character: MAX_CHAR_LENGTH,
},
},
end_position: Range {
start: Position {
line: test_end_position.row as u32,
character: 0,
},
end: Position {
line: test_end_position.row as u32,
character: test_end_position.column as u32,
},
},
};
test_items.push(test_item);
test_start_position = Point::default();
test_end_position = Point::default();
}
_ => {}
}
}
}
Ok(test_items)
discover_with_treesitter(file_path, &tree_sitter_javascript::language(), query)
}
#[derive(Eq, PartialEq, Debug)]
pub struct JestRunner;
impl Runner for JestRunner {
fn disover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
#[tracing::instrument(skip(self))]
fn discover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
let file_paths = args.file_paths;
let mut discover_results: DiscoverResult = vec![];
let mut discover_results: DiscoverResult = DiscoverResult { data: vec![] };
for file_path in file_paths {
discover_results.push(DiscoverResultItem {
discover_results.data.push(FoundFileTests {
tests: discover(&file_path)?,
path: file_path,
})
}
serde_json::to_writer(std::io::stdout(), &discover_results)?;
send_stdout(&discover_results)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn run_file_test(
&self,
args: testing_language_server::spec::RunFileTestArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let workspace_root = args.workspace_root;
let tempdir = tempdir().unwrap();
let tempdir_path = tempdir.path();
let tempfile_path = tempdir_path.join("jest.json");
let workspace_root = args.workspace;
let log_path = LOG_LOCATION.join("jest.json");
std::process::Command::new("jest")
.current_dir(&workspace_root)
.args([
@ -293,23 +190,24 @@ impl Runner for JestRunner {
"--verbose",
"--json",
"--outputFile",
tempfile_path.to_str().unwrap(),
log_path.to_str().unwrap(),
])
.output()
.unwrap();
let test_result = fs::read_to_string(tempfile_path)?;
let test_result = fs::read_to_string(log_path)?;
let diagnostics: RunFileTestResult = parse_diagnostics(&test_result, file_paths)?;
serde_json::to_writer(std::io::stdout(), &diagnostics)?;
send_stdout(&diagnostics)?;
Ok(())
}
fn detect_workspaces_root(
#[tracing::instrument(skip(self))]
fn detect_workspaces(
&self,
args: testing_language_server::spec::DetectWorkspaceArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let detect_result = detect_workspaces(file_paths)?;
serde_json::to_writer(std::io::stdout(), &detect_result)?;
let detect_result = detect_workspaces(file_paths);
send_stdout(&detect_result)?;
Ok(())
}
}
@ -324,62 +222,63 @@ mod tests {
fn test_parse_diagnostics() {
let test_result = std::env::current_dir()
.unwrap()
.join("../../test_proj/jest/output.json");
.join("../../demo/jest/output.json");
let test_result = std::fs::read_to_string(test_result).unwrap();
let diagnostics = parse_diagnostics(
&test_result,
vec![
"/absolute_path/test_proj/jest/index.spec.js".to_string(),
"/absolute_path/test_proj/jest/another.spec.js".to_string(),
"/absolute_path/demo/jest/index.spec.js".to_string(),
"/absolute_path/demo/jest/another.spec.js".to_string(),
],
)
.unwrap();
assert_eq!(diagnostics.len(), 2);
assert_eq!(diagnostics.data.len(), 2);
}
#[test]
fn test_detect_workspace() {
let current_dir = std::env::current_dir().unwrap();
let absolute_path_of_test_proj = current_dir.join("../../test_proj/jest");
let test_proj_indexjs = absolute_path_of_test_proj.join("index.spec.js");
let file_paths: Vec<String> = [test_proj_indexjs]
let absolute_path_of_demo = current_dir.join("../../demo/jest");
let demo_indexjs = absolute_path_of_demo.join("index.spec.js");
let file_paths: Vec<String> = [demo_indexjs]
.iter()
.map(|file_path| file_path.to_str().unwrap().to_string())
.collect();
let detect_result = detect_workspaces(file_paths).unwrap();
assert_eq!(detect_result.len(), 1);
detect_result.iter().for_each(|(workspace, _)| {
assert_eq!(workspace, absolute_path_of_test_proj.to_str().unwrap());
let detect_result = detect_workspaces(file_paths);
assert_eq!(detect_result.data.len(), 1);
detect_result.data.iter().for_each(|(workspace, _)| {
assert_eq!(workspace, absolute_path_of_demo.to_str().unwrap());
});
}
#[test]
fn test_discover() {
let file_path = "../../test_proj/jest/index.spec.js";
let file_path = "../../demo/jest/index.spec.js";
let test_items = discover(file_path).unwrap();
assert_eq!(test_items.len(), 1);
assert_eq!(
test_items,
vec![TestItem {
id: String::from(":fail"),
name: String::from("fail"),
id: String::from("index::fail"),
name: String::from("index::fail"),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 2,
line: 1,
character: 2
},
end: Position {
line: 2,
line: 1,
character: MAX_CHAR_LENGTH
}
},
end_position: Range {
start: Position {
line: 4,
line: 3,
character: 0
},
end: Position {
line: 4,
line: 3,
character: 4
}
}

View file

@ -1,2 +1,9 @@
pub mod cargo_nextest;
pub mod cargo_test;
pub mod node_test;
pub mod deno;
pub mod go;
pub mod jest;
pub mod phpunit;
pub mod util;
pub mod vitest;

View file

@ -0,0 +1,924 @@
use std::process::Output;
use regex::Regex;
use testing_language_server::{
error::LSError,
spec::{
DetectWorkspaceResult, DiscoverResult, FileDiagnostics, FoundFileTests, RunFileTestResult,
TestItem,
},
};
use xml::{reader::XmlEvent, ParserConfig};
use crate::model::Runner;
use super::util::{
detect_workspaces_from_file_list, discover_with_treesitter, send_stdout, write_result_log,
ResultFromXml,
};
#[derive(Eq, PartialEq, Debug)]
pub struct NodeTestRunner;
fn discover(file_path: &str) -> Result<Vec<TestItem>, LSError> {
// from https://github.com/nvim-neotest/neotest-jest/blob/514fd4eae7da15fd409133086bb8e029b65ac43f/lua/neotest-jest/init.lua#L162
// license: https://github.com/nvim-neotest/neotest-jest/blob/514fd4eae7da15fd409133086bb8e029b65ac43f/LICENSE.md
let query = r#"
; -- Namespaces --
; Matches: `describe('context', () => {})`
((call_expression
function: (identifier) @func_name (#eq? @func_name "describe")
arguments: (arguments (string (string_fragment) @namespace.name) (arrow_function))
)) @namespace.definition
; Matches: `describe('context', function() {})`
((call_expression
function: (identifier) @func_name (#eq? @func_name "describe")
arguments: (arguments (string (string_fragment) @namespace.name) (function_expression))
)) @namespace.definition
; Matches: `describe.only('context', () => {})`
((call_expression
function: (member_expression
object: (identifier) @func_name (#any-of? @func_name "describe")
)
arguments: (arguments (string (string_fragment) @namespace.name) (arrow_function))
)) @namespace.definition
; Matches: `describe.only('context', function() {})`
((call_expression
function: (member_expression
object: (identifier) @func_name (#any-of? @func_name "describe")
)
arguments: (arguments (string (string_fragment) @namespace.name) (function_expression))
)) @namespace.definition
; -- Tests --
; Matches: `test("test name", (t) => {})` or `it("test name", (t) => {})`
((call_expression
function: (identifier) @func_name (#any-of? @func_name "test" "it")
arguments: (arguments (string (string_fragment) @test.name) [(arrow_function) (function_expression)])
)) @test.definition
; Matches: `test("test name", { skip: true }, (t) => {})`
((call_expression
function: (identifier) @func_name (#any-of? @func_name "test" "it")
arguments: (arguments
(string (string_fragment) @test.name)
(object)
[(arrow_function) (function_expression)]
)
)) @test.definition
; Matches: `test("test name", async (t) => {})`
((call_expression
function: (identifier) @func_name (#any-of? @func_name "test" "it")
arguments: (arguments
(string (string_fragment) @test.name)
(arrow_function (identifier) @async (#eq? @async "async"))
)
)) @test.definition
; Matches: `test("test name", (t, done) => {})`
((call_expression
function: (identifier) @func_name (#any-of? @func_name "test" "it")
arguments: (arguments
(string (string_fragment) @test.name)
[(arrow_function (formal_parameters (identifier) (identifier))) (function_expression)]
)
)) @test.definition
"#;
discover_with_treesitter(file_path, &tree_sitter_javascript::language(), query)
}
// characters can be like
// \n[Error [ERR_TEST_FAILURE]: assert is not defined] {\n failureType: 'testCodeFailure',\n cause: ReferenceError [Error]: assert is not defined\n at TestContext.<anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:6:3)\n at Test.runInAsyncScope (node:async_hooks:203:9)\n at Test.run (node:internal/test_runner/test:631:25)\n at Test.start (node:internal/test_runner/test:542:17)\n at startSubtest (node:internal/test_runner/harness:214:17),\n code: 'ERR_TEST_FAILURE'\n}\n\t\t
fn get_result_from_characters(
error_text: &str,
target_file_paths: &[String],
) -> Result<ResultFromXml, anyhow::Error> {
let re_path_line = Regex::new(r"\(([^:]+):(\d+):(\d+)\)").unwrap();
for line in error_text.lines() {
if let Some(caps) = re_path_line.captures(line) {
let file_path = &caps[1];
if !target_file_paths.contains(&file_path.to_string()) {
continue;
}
return Ok(ResultFromXml {
// remove prefix because it's like "\n"
message: error_text.strip_prefix("\n").unwrap().to_string(),
path: file_path.to_string(),
line: caps[2].parse::<u32>().unwrap(),
col: caps[3].parse::<u32>().unwrap(),
});
}
}
Err(anyhow::anyhow!("Failed to parse error from {}", error_text))
}
fn get_result_from_xml(
output: &str,
target_file_paths: &[String],
) -> Result<Vec<ResultFromXml>, anyhow::Error> {
use xml::common::Position;
let mut reader = ParserConfig::default()
.ignore_root_level_whitespace(false)
.create_reader(output.as_bytes());
let local_name = "failure";
let mut in_failure = false;
let mut result: Vec<ResultFromXml> = Vec::new();
loop {
match reader.next() {
Ok(e) => match e {
XmlEvent::StartElement { name, .. } => {
if name.local_name.starts_with(local_name) {
in_failure = true;
}
}
XmlEvent::EndElement { .. } => {
in_failure = false;
}
XmlEvent::Characters(data) => {
if let Ok(result_from_xml) =
get_result_from_characters(&data, target_file_paths)
{
if in_failure {
result.push(result_from_xml);
}
}
}
XmlEvent::EndDocument => break,
_ => {}
},
Err(e) => {
tracing::error!("Error at {}: {e}", reader.position());
break;
}
}
}
Ok(result)
}
impl Runner for NodeTestRunner {
#[tracing::instrument(skip(self))]
fn discover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
let file_paths = args.file_paths;
let mut discover_results: DiscoverResult = DiscoverResult { data: vec![] };
for file_path in file_paths {
discover_results.data.push(FoundFileTests {
tests: discover(&file_path)?,
path: file_path,
})
}
send_stdout(&discover_results)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn run_file_test(
&self,
args: testing_language_server::spec::RunFileTestArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let workspace_root = args.workspace;
let output = std::process::Command::new("node")
.current_dir(&workspace_root)
.args(["--test", "--test-reporter", "junit"])
.args(args.extra)
.args(&file_paths)
.output()
.unwrap();
write_result_log("node-test.xml", &output)?;
let Output { stdout, stderr, .. } = output;
if stdout.is_empty() && !stderr.is_empty() {
return Err(LSError::Adapter(String::from_utf8(stderr).unwrap()));
}
let stdout = String::from_utf8(stdout).unwrap();
let result_from_xml = get_result_from_xml(&stdout, &file_paths)?;
let result_item: Vec<FileDiagnostics> = result_from_xml
.into_iter()
.map(|result_from_xml| {
let result_item: FileDiagnostics = result_from_xml.into();
result_item
})
.collect();
let result = RunFileTestResult {
data: result_item,
messages: vec![],
};
send_stdout(&result)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn detect_workspaces(
&self,
args: testing_language_server::spec::DetectWorkspaceArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let detect_result: DetectWorkspaceResult =
detect_workspaces_from_file_list(&file_paths, &["package.json".to_string()]);
send_stdout(&detect_result)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use lsp_types::{Position, Range};
use super::*;
#[test]
fn parse_xml() {
let mut xml_path = std::env::current_dir().unwrap();
xml_path.push("../../demo/node-test/output.xml");
let content = std::fs::read_to_string(&xml_path).unwrap();
let target_file_path =
"/home/test-user/projects/testing-language-server/demo/node-test/index.test.js";
let result = get_result_from_xml(&content, &[target_file_path.to_string()]).unwrap();
assert_eq!(result.len(), 9);
let paths = result
.iter()
.map(|result_from_xml| result_from_xml.path.clone())
.collect::<Vec<_>>();
for path in paths {
assert_eq!(target_file_path, path.as_str());
}
let lines = result
.iter()
.map(|result_from_xml| result_from_xml.line)
.collect::<Vec<_>>();
assert_eq!(lines, [13, 25, 32, 47, 87, 101, 145, 156, 172]);
let cols = result
.iter()
.map(|result_from_xml| result_from_xml.col)
.collect::<Vec<_>>();
assert_eq!(cols, [10, 10, 14, 10, 9, 9, 9, 11, 3]);
}
#[test]
fn test_discover() {
let file_path = "../../demo/node-test/index.test.js";
let test_items = discover(file_path).unwrap();
assert_eq!(test_items.len(), 26);
assert_eq!(
test_items,
[
TestItem {
id: "synchronous passing test".to_string(),
name: "synchronous passing test".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 5,
character: 0
},
end: Position {
line: 5,
character: 10000
}
},
end_position: Range {
start: Position {
line: 8,
character: 0
},
end: Position {
line: 8,
character: 2
}
}
},
TestItem {
id: "synchronous failing test".to_string(),
name: "synchronous failing test".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 10,
character: 0
},
end: Position {
line: 10,
character: 10000
}
},
end_position: Range {
start: Position {
line: 13,
character: 0
},
end: Position {
line: 13,
character: 2
}
}
},
TestItem {
id: "asynchronous passing test".to_string(),
name: "asynchronous passing test".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 15,
character: 0
},
end: Position {
line: 15,
character: 10000
}
},
end_position: Range {
start: Position {
line: 19,
character: 0
},
end: Position {
line: 19,
character: 2
}
}
},
TestItem {
id: "asynchronous failing test".to_string(),
name: "asynchronous failing test".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 21,
character: 0
},
end: Position {
line: 21,
character: 10000
}
},
end_position: Range {
start: Position {
line: 25,
character: 0
},
end: Position {
line: 25,
character: 2
}
}
},
TestItem {
id: "failing test using Promises".to_string(),
name: "failing test using Promises".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 27,
character: 0
},
end: Position {
line: 27,
character: 10000
}
},
end_position: Range {
start: Position {
line: 34,
character: 0
},
end: Position {
line: 34,
character: 2
}
}
},
TestItem {
id: "callback passing test".to_string(),
name: "callback passing test".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 36,
character: 0
},
end: Position {
line: 36,
character: 10000
}
},
end_position: Range {
start: Position {
line: 40,
character: 0
},
end: Position {
line: 40,
character: 2
}
}
},
TestItem {
id: "callback failing test".to_string(),
name: "callback failing test".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 42,
character: 0
},
end: Position {
line: 42,
character: 10000
}
},
end_position: Range {
start: Position {
line: 48,
character: 0
},
end: Position {
line: 48,
character: 2
}
}
},
TestItem {
id: "top level test".to_string(),
name: "top level test".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 51,
character: 0
},
end: Position {
line: 51,
character: 10000
}
},
end_position: Range {
start: Position {
line: 59,
character: 0
},
end: Position {
line: 59,
character: 2
}
}
},
TestItem {
id: "skip option".to_string(),
name: "skip option".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 63,
character: 0
},
end: Position {
line: 63,
character: 10000
}
},
end_position: Range {
start: Position {
line: 65,
character: 0
},
end: Position {
line: 65,
character: 2
}
}
},
TestItem {
id: "skip option with message".to_string(),
name: "skip option with message".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 68,
character: 0
},
end: Position {
line: 68,
character: 10000
}
},
end_position: Range {
start: Position {
line: 70,
character: 0
},
end: Position {
line: 70,
character: 2
}
}
},
TestItem {
id: "skip() method".to_string(),
name: "skip() method".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 72,
character: 0
},
end: Position {
line: 72,
character: 10000
}
},
end_position: Range {
start: Position {
line: 75,
character: 0
},
end: Position {
line: 75,
character: 2
}
}
},
TestItem {
id: "skip() method with message".to_string(),
name: "skip() method with message".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 77,
character: 0
},
end: Position {
line: 77,
character: 10000
}
},
end_position: Range {
start: Position {
line: 80,
character: 0
},
end: Position {
line: 80,
character: 2
}
}
},
TestItem {
id: "todo option".to_string(),
name: "todo option".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 84,
character: 0
},
end: Position {
line: 84,
character: 10000
}
},
end_position: Range {
start: Position {
line: 87,
character: 0
},
end: Position {
line: 87,
character: 2
}
}
},
TestItem {
id: "todo option with message".to_string(),
name: "todo option with message".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 90,
character: 0
},
end: Position {
line: 90,
character: 10000
}
},
end_position: Range {
start: Position {
line: 92,
character: 0
},
end: Position {
line: 92,
character: 2
}
}
},
TestItem {
id: "todo() method".to_string(),
name: "todo() method".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 94,
character: 0
},
end: Position {
line: 94,
character: 10000
}
},
end_position: Range {
start: Position {
line: 96,
character: 0
},
end: Position {
line: 96,
character: 2
}
}
},
TestItem {
id: "todo() method with message".to_string(),
name: "todo() method with message".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 98,
character: 0
},
end: Position {
line: 98,
character: 10000
}
},
end_position: Range {
start: Position {
line: 101,
character: 0
},
end: Position {
line: 101,
character: 2
}
}
},
TestItem {
id: "A thing::should work".to_string(),
name: "A thing::should work".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 105,
character: 2
},
end: Position {
line: 105,
character: 10000
}
},
end_position: Range {
start: Position {
line: 107,
character: 0
},
end: Position {
line: 107,
character: 4
}
}
},
TestItem {
id: "A thing::should be ok".to_string(),
name: "A thing::should be ok".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 109,
character: 2
},
end: Position {
line: 109,
character: 10000
}
},
end_position: Range {
start: Position {
line: 111,
character: 0
},
end: Position {
line: 111,
character: 4
}
}
},
TestItem {
id: "A thing::a nested thing::should work".to_string(),
name: "A thing::a nested thing::should work".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 114,
character: 4
},
end: Position {
line: 114,
character: 10000
}
},
end_position: Range {
start: Position {
line: 116,
character: 0
},
end: Position {
line: 116,
character: 6
}
}
},
TestItem {
id: "only: this test is run".to_string(),
name: "only: this test is run".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 123,
character: 0
},
end: Position {
line: 123,
character: 10000
}
},
end_position: Range {
start: Position {
line: 139,
character: 0
},
end: Position {
line: 139,
character: 2
}
}
},
TestItem {
id: "only: this test is not run".to_string(),
name: "only: this test is not run".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 142,
character: 0
},
end: Position {
line: 142,
character: 10000
}
},
end_position: Range {
start: Position {
line: 145,
character: 0
},
end: Position {
line: 145,
character: 2
}
}
},
TestItem {
id: "A suite::this test is run A ".to_string(),
name: "A suite::this test is run A ".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 149,
character: 2
},
end: Position {
line: 149,
character: 10000
}
},
end_position: Range {
start: Position {
line: 151,
character: 0
},
end: Position {
line: 151,
character: 4
}
}
},
TestItem {
id: "A suite::this test is not run B".to_string(),
name: "A suite::this test is not run B".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 153,
character: 2
},
end: Position {
line: 153,
character: 10000
}
},
end_position: Range {
start: Position {
line: 156,
character: 0
},
end: Position {
line: 156,
character: 4
}
}
},
TestItem {
id: "this test is run C".to_string(),
name: "this test is run C".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 161,
character: 2
},
end: Position {
line: 161,
character: 10000
}
},
end_position: Range {
start: Position {
line: 163,
character: 0
},
end: Position {
line: 163,
character: 4
}
}
},
TestItem {
id: "this test is run D".to_string(),
name: "this test is run D".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 165,
character: 2
},
end: Position {
line: 165,
character: 10000
}
},
end_position: Range {
start: Position {
line: 167,
character: 0
},
end: Position {
line: 167,
character: 4
}
}
},
TestItem {
id: "import from external file. this must be fail".to_string(),
name: "import from external file. this must be fail".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 170,
character: 0
},
end: Position {
line: 170,
character: 10000
}
},
end_position: Range {
start: Position {
line: 172,
character: 0
},
end: Position {
line: 172,
character: 2
}
}
}
]
);
}
}

View file

@ -0,0 +1,318 @@
use std::fs::File;
use std::io::BufReader;
use std::process::Output;
use testing_language_server::error::LSError;
use testing_language_server::spec::{
DetectWorkspaceResult, DiscoverResult, FileDiagnostics, FoundFileTests, RunFileTestResult,
TestItem,
};
use xml::reader::{ParserConfig, XmlEvent};
use crate::model::Runner;
use super::util::{
detect_workspaces_from_file_list, discover_with_treesitter, send_stdout, ResultFromXml,
LOG_LOCATION,
};
fn detect_workspaces(file_paths: Vec<String>) -> DetectWorkspaceResult {
detect_workspaces_from_file_list(&file_paths, &["composer.json".to_string()])
}
fn get_result_from_characters(characters: &str) -> Result<ResultFromXml, anyhow::Error> {
// characters can be like
// Tests\\CalculatorTest::testFail1\nFailed asserting that 8 matches expected 1.\n\n/home/kbwo/projects/github.com/kbwo/testing-language-server/demo/phpunit/src/CalculatorTest.php:28
let mut split = characters.split("\n\n");
let message = split
.next()
.unwrap()
.trim_start_matches("Failed asserting that ")
.trim_end_matches(".")
.to_string();
let location = split.next().unwrap().to_string();
let mut split_location = location.split(":");
let path = split_location.next().unwrap().to_string();
let line = split_location.next().unwrap().parse().unwrap();
Ok(ResultFromXml {
message,
path,
line,
col: 1,
})
}
fn get_result_from_xml(path: &str) -> Result<Vec<ResultFromXml>, anyhow::Error> {
use xml::common::Position;
let file = File::open(path).unwrap();
let mut reader = ParserConfig::default()
.ignore_root_level_whitespace(false)
.create_reader(BufReader::new(file));
let local_name = "failure";
let mut in_failure = false;
let mut result: Vec<ResultFromXml> = Vec::new();
loop {
match reader.next() {
Ok(e) => match e {
XmlEvent::StartElement { name, .. } => {
if name.local_name.starts_with(local_name) {
in_failure = true;
}
}
XmlEvent::EndElement { .. } => {
in_failure = false;
}
XmlEvent::Characters(data) => {
if let Ok(result_from_xml) = get_result_from_characters(&data) {
if in_failure {
result.push(result_from_xml);
}
}
}
XmlEvent::EndDocument => break,
_ => {}
},
Err(e) => {
tracing::error!("Error at {}: {e}", reader.position());
break;
}
}
}
Ok(result)
}
fn discover(file_path: &str) -> Result<Vec<TestItem>, LSError> {
// from https://github.com/olimorris/neotest-phpunit/blob/bbd79d95e927ccd16f0e1d765060058d34838e2e/lua/neotest-phpunit/init.lua#L111
// license: https://github.com/olimorris/neotest-phpunit/blob/bbd79d95e927ccd16f0e1d765060058d34838e2e/LICENSE
let query = r#"
((class_declaration
name: (name) @namespace.name (#match? @namespace.name "Test")
)) @namespace.definition
((method_declaration
(attribute_list
(attribute_group
(attribute) @test_attribute (#match? @test_attribute "Test")
)
)
(
(visibility_modifier)
(name) @test.name
) @test.definition
))
((method_declaration
(name) @test.name (#match? @test.name "test")
)) @test.definition
(((comment) @test_comment (#match? @test_comment "\\@test") .
(method_declaration
(name) @test.name
) @test.definition
))
"#;
discover_with_treesitter(file_path, &tree_sitter_php::language_php(), query)
}
#[derive(Eq, PartialEq, Debug)]
pub struct PhpunitRunner;
impl Runner for PhpunitRunner {
#[tracing::instrument(skip(self))]
fn discover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
let file_paths = args.file_paths;
let mut discover_results: DiscoverResult = DiscoverResult { data: vec![] };
for file_path in file_paths {
discover_results.data.push(FoundFileTests {
tests: discover(&file_path)?,
path: file_path,
})
}
send_stdout(&discover_results)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn run_file_test(
&self,
args: testing_language_server::spec::RunFileTestArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let workspace_root = args.workspace;
let log_path = LOG_LOCATION.join("phpunit.xml");
let tests = file_paths
.iter()
.map(|path| {
discover(path).map(|test_items| {
test_items
.into_iter()
.map(|item| item.id)
.collect::<Vec<String>>()
})
})
.filter_map(Result::ok)
.flatten()
.collect::<Vec<_>>();
let test_names = tests.join("|");
let filter_pattern = format!("/{test_names}/");
let output = std::process::Command::new("phpunit")
.current_dir(&workspace_root)
.args([
"--log-junit",
log_path.to_str().unwrap(),
"--filter",
&filter_pattern,
])
.args(file_paths)
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.output()
.unwrap();
let Output { stdout, stderr, .. } = output;
if stdout.is_empty() && !stderr.is_empty() {
return Err(LSError::Adapter(String::from_utf8(stderr).unwrap()));
}
let result_from_xml = get_result_from_xml(log_path.to_str().unwrap())?;
let result_item: Vec<FileDiagnostics> = result_from_xml
.into_iter()
.map(|result_from_xml| {
let result_item: FileDiagnostics = result_from_xml.into();
result_item
})
.collect();
let result = RunFileTestResult {
data: result_item,
messages: vec![],
};
send_stdout(&result)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn detect_workspaces(
&self,
args: testing_language_server::spec::DetectWorkspaceArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let detect_result = detect_workspaces(file_paths);
send_stdout(&detect_result)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use lsp_types::{Position, Range};
use crate::runner::util::MAX_CHAR_LENGTH;
use super::*;
#[test]
fn parse_xml() {
let mut path = std::env::current_dir().unwrap();
path.push("../../demo/phpunit/output.xml");
let result = get_result_from_xml(path.to_str().unwrap()).unwrap();
assert_eq!(result.len(), 1);
assert_eq!(
result[0].message,
"Tests\\CalculatorTest::testFail1\nFailed asserting that 8 matches expected 1"
);
assert_eq!(
result[0].path,
"/home/kbwo/testing-language-server/demo/phpunit/src/CalculatorTest.php"
);
assert_eq!(result[0].line, 28);
}
#[test]
fn test_discover() {
let file_path = "../../demo/phpunit/src/CalculatorTest.php";
let test_items = discover(file_path).unwrap();
assert_eq!(test_items.len(), 3);
assert_eq!(
test_items,
[
TestItem {
id: "CalculatorTest::testAdd".to_string(),
name: "CalculatorTest::testAdd".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 9,
character: 4
},
end: Position {
line: 9,
character: MAX_CHAR_LENGTH
}
},
end_position: Range {
start: Position {
line: 14,
character: 0
},
end: Position {
line: 14,
character: 5
}
}
},
TestItem {
id: "CalculatorTest::testSubtract".to_string(),
name: "CalculatorTest::testSubtract".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 16,
character: 4
},
end: Position {
line: 16,
character: MAX_CHAR_LENGTH
}
},
end_position: Range {
start: Position {
line: 21,
character: 0
},
end: Position {
line: 21,
character: 5
}
}
},
TestItem {
id: "CalculatorTest::testFail1".to_string(),
name: "CalculatorTest::testFail1".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 23,
character: 4
},
end: Position {
line: 23,
character: MAX_CHAR_LENGTH
}
},
end_position: Range {
start: Position {
line: 28,
character: 0
},
end: Position {
line: 28,
character: 5
}
}
}
]
)
}
}

View file

@ -0,0 +1,421 @@
use std::collections::{HashMap, HashSet};
use std::io;
use std::path::{Path, PathBuf};
use std::process::Output;
use std::str::FromStr;
use std::sync::LazyLock;
use lsp_types::{Diagnostic, DiagnosticSeverity, Position, Range};
use regex::Regex;
use serde::Serialize;
use testing_language_server::spec::{DetectWorkspaceResult, FileDiagnostics, TestItem};
use testing_language_server::{error::LSError, spec::RunFileTestResult};
use tree_sitter::{Language, Point, Query, QueryCursor};
pub struct DiscoverWithTSOption {}
pub static LOG_LOCATION: LazyLock<PathBuf> = LazyLock::new(|| {
let home_dir = dirs::home_dir().unwrap();
home_dir.join(".config/testing_language_server/adapter/")
});
// If the character value is greater than the line length it defaults back to the line length.
pub const MAX_CHAR_LENGTH: u32 = 10000;
#[derive(Debug)]
pub struct ResultFromXml {
pub message: String,
pub path: String,
pub line: u32,
pub col: u32,
}
#[allow(clippy::from_over_into)]
impl Into<FileDiagnostics> for ResultFromXml {
fn into(self) -> FileDiagnostics {
FileDiagnostics {
path: self.path,
diagnostics: vec![Diagnostic {
message: self.message,
range: Range {
start: Position {
line: self.line - 1,
character: self.col - 1,
},
end: Position {
line: self.line - 1,
character: MAX_CHAR_LENGTH,
},
},
severity: Some(DiagnosticSeverity::ERROR),
..Default::default()
}],
}
}
}
/// determine if a particular file is the root of workspace based on whether it is in the same directory
fn detect_workspace_from_file(file_path: PathBuf, file_names: &[String]) -> Option<String> {
let parent = file_path.parent();
if let Some(parent) = parent {
if file_names
.iter()
.any(|file_name| parent.join(file_name).exists())
{
return Some(parent.to_string_lossy().to_string());
} else {
detect_workspace_from_file(parent.to_path_buf(), file_names)
}
} else {
None
}
}
pub fn detect_workspaces_from_file_list(
target_file_paths: &[String],
file_names: &[String],
) -> DetectWorkspaceResult {
let mut result_map: HashMap<String, Vec<String>> = HashMap::new();
let mut file_paths = target_file_paths.to_vec();
file_paths.sort_by_key(|b| b.len());
for file_path in file_paths {
let existing_workspace = result_map
.iter()
.find(|(workspace_root, _)| file_path.contains(workspace_root.as_str()));
if let Some((workspace_root, _)) = existing_workspace {
result_map
.entry(workspace_root.to_string())
.or_default()
.push(file_path.clone());
}
// Push the file path to the found workspace even if existing_workspace becomes Some.
// In some cases, a simple way to find a workspace,
// such as the relationship between the project root and the adapter crate in this repository, may not work.
let workspace =
detect_workspace_from_file(PathBuf::from_str(&file_path).unwrap(), file_names);
if let Some(workspace) = workspace {
if result_map
.get(&workspace)
.map(|v| !v.contains(&file_path))
.unwrap_or(true)
{
result_map
.entry(workspace)
.or_default()
.push(file_path.clone());
}
}
}
DetectWorkspaceResult { data: result_map }
}
pub fn send_stdout<T>(value: &T) -> Result<(), LSError>
where
T: ?Sized + Serialize + std::fmt::Debug,
{
tracing::info!("adapter stdout: {:#?}", value);
serde_json::to_writer(std::io::stdout(), &value)?;
Ok(())
}
pub fn clean_ansi(input: &str) -> String {
let re = Regex::new(r"\x1B\[([0-9]{1,2}(;[0-9]{1,2})*)?[m|K]").unwrap();
re.replace_all(input, "").to_string()
}
pub fn discover_rust_tests(file_path: &str) -> Result<Vec<TestItem>, LSError> {
// from https://github.com/rouge8/neotest-rust/blob/0418811e1e3499b2501593f2e131d02f5e6823d4/lua/neotest-rust/init.lua#L167
// license: https://github.com/rouge8/neotest-rust/blob/0418811e1e3499b2501593f2e131d02f5e6823d4/LICENSE
let query = r#"
(
(attribute_item
[
(attribute
(identifier) @macro_name
)
(attribute
[
(identifier) @macro_name
(scoped_identifier
name: (identifier) @macro_name
)
]
)
]
)
[
(attribute_item
(attribute
(identifier)
)
)
(line_comment)
]*
.
(function_item
name: (identifier) @test.name
) @test.definition
(#any-of? @macro_name "test" "rstest" "case")
)
(mod_item name: (identifier) @namespace.name)? @namespace.definition
"#;
discover_with_treesitter(file_path, &tree_sitter_rust::language(), query)
}
pub fn discover_with_treesitter(
file_path: &str,
language: &Language,
query: &str,
) -> Result<Vec<TestItem>, LSError> {
let mut parser = tree_sitter::Parser::new();
let mut test_items: Vec<TestItem> = vec![];
parser
.set_language(language)
.expect("Error loading Rust grammar");
let source_code = std::fs::read_to_string(file_path)?;
let tree = parser.parse(&source_code, None).unwrap();
let query = Query::new(language, query).expect("Error creating query");
let mut cursor = QueryCursor::new();
cursor.set_byte_range(tree.root_node().byte_range());
let source = source_code.as_bytes();
let matches = cursor.matches(&query, tree.root_node(), source);
let mut namespace_name = String::new();
let mut namespace_position_stack: Vec<(Point, Point)> = vec![];
let mut test_id_set = HashSet::new();
for m in matches {
let mut test_start_position = Point::default();
let mut test_end_position = Point::default();
for capture in m.captures {
let capture_name = query.capture_names()[capture.index as usize];
let value = capture.node.utf8_text(source)?;
let start_position = capture.node.start_position();
let end_position = capture.node.end_position();
match capture_name {
"namespace.definition" => {
namespace_position_stack.push((start_position, end_position));
}
"namespace.name" => {
let current_namespace = namespace_position_stack.first();
if let Some((ns_start, ns_end)) = current_namespace {
// In namespace definition
if start_position.row >= ns_start.row
&& end_position.row <= ns_end.row
&& !namespace_name.is_empty()
{
namespace_name = format!("{}::{}", namespace_name, value);
} else {
namespace_name = value.to_string();
}
} else {
namespace_name = value.to_string();
}
}
"test.definition" => {
if let Some((ns_start, ns_end)) = namespace_position_stack.first() {
if start_position.row < ns_start.row || end_position.row > ns_end.row {
namespace_position_stack.remove(0);
namespace_name = String::new();
}
}
test_start_position = start_position;
test_end_position = end_position;
}
"test.name" => {
let test_id = if namespace_name.is_empty() {
value.to_string()
} else {
format!("{}::{}", namespace_name, value)
};
if test_id_set.contains(&test_id) {
continue;
} else {
test_id_set.insert(test_id.clone());
}
let test_item = TestItem {
id: test_id.clone(),
name: test_id,
path: file_path.to_string(),
start_position: Range {
start: Position {
line: test_start_position.row as u32,
character: test_start_position.column as u32,
},
end: Position {
line: test_start_position.row as u32,
character: MAX_CHAR_LENGTH,
},
},
end_position: Range {
start: Position {
line: test_end_position.row as u32,
character: 0,
},
end: Position {
line: test_end_position.row as u32,
character: test_end_position.column as u32,
},
},
};
test_items.push(test_item);
test_start_position = Point::default();
test_end_position = Point::default();
}
_ => {}
}
}
}
Ok(test_items)
}
pub fn parse_cargo_diagnostics(
contents: &str,
workspace_root: PathBuf,
file_paths: &[String],
test_items: &[TestItem],
) -> RunFileTestResult {
let contents = contents.replace("\r\n", "\n");
let lines = contents.lines();
let mut result_map: HashMap<String, Vec<Diagnostic>> = HashMap::new();
for (i, line) in lines.clone().enumerate() {
// Example:
// thread 'server::tests::test_panic' panicked at src/server.rs:584:9:
let re = Regex::new(r"thread '([^']+)' panicked at ([^:]+):(\d+):(\d+):").unwrap();
if let Some(m) = re.captures(line) {
let mut message = String::new();
// <filename>::<id>
let id_with_file = m.get(1).unwrap().as_str().to_string();
// relaive path
let relative_file_path = m.get(2).unwrap().as_str().to_string();
if let Some(file_path) = file_paths.iter().find(|path| {
path.contains(workspace_root.join(&relative_file_path).to_str().unwrap())
}) {
let matched_test_item = test_items.iter().find(|item| {
let item_path = item.path.strip_prefix(workspace_root.to_str().unwrap()).unwrap_or(&item.path);
let item_path = item_path.strip_suffix(".rs").unwrap_or(item_path);
let item_path = item_path.replace('/', "::")
.replace("::src::lib", "")
.replace("::src::main", "")
.replace("::src::", "");
let exact_id = format!("{}::{}", item_path, item.id);
tracing::info!("DEBUGPRINT[7]: util.rs:301: item_path={:#?}, exact_id={:#?}, id_with_file={:#?}", item_path, exact_id, id_with_file);
exact_id == id_with_file
});
let lnum = m.get(3).unwrap().as_str().parse::<u32>().unwrap() - 1;
let col = m.get(4).unwrap().as_str().parse::<u32>().unwrap() - 1;
let mut next_i = i + 1;
while next_i < lines.clone().count()
&& !lines.clone().nth(next_i).unwrap().is_empty()
{
message = format!("{}{}\n", message, lines.clone().nth(next_i).unwrap());
next_i += 1;
}
let diagnostic = Diagnostic {
range: Range {
start: Position {
line: lnum,
character: col,
},
end: Position {
line: lnum,
character: MAX_CHAR_LENGTH,
},
},
message: message.clone(),
severity: Some(DiagnosticSeverity::ERROR),
..Diagnostic::default()
};
// if the test item is matched,
// add a diagnostic to the beginning of the test item
// in order to show which test failed.
// If this code does not exist, only panicked positions are shown
if let Some(test_item) = matched_test_item {
let message = format!(
"`{}` failed at {relative_file_path}:{lnum}:{col}\nMessage:\n{message}",
test_item.name
);
let lnum = test_item.start_position.start.line;
let col = test_item.start_position.start.character;
let diagnostic = Diagnostic {
range: Range {
start: Position {
line: lnum,
character: col,
},
end: Position {
line: lnum,
character: MAX_CHAR_LENGTH,
},
},
message,
severity: Some(DiagnosticSeverity::ERROR),
..Diagnostic::default()
};
result_map
.entry(test_item.path.to_string())
.or_default()
.push(diagnostic);
}
result_map
.entry(file_path.to_string())
.or_default()
.push(diagnostic);
} else {
continue;
}
}
}
let data = result_map
.into_iter()
.map(|(path, diagnostics)| FileDiagnostics { path, diagnostics })
.collect();
RunFileTestResult {
data,
messages: vec![],
}
}
/// remove this function because duplicate implementation
pub fn resolve_path(base_dir: &Path, relative_path: &str) -> PathBuf {
let absolute = if Path::new(relative_path).is_absolute() {
PathBuf::from(relative_path)
} else {
base_dir.join(relative_path)
};
let mut components = Vec::new();
for component in absolute.components() {
match component {
std::path::Component::ParentDir => {
components.pop();
}
std::path::Component::Normal(_) | std::path::Component::RootDir => {
components.push(component);
}
_ => {}
}
}
PathBuf::from_iter(components)
}
pub fn write_result_log(file_name: &str, output: &Output) -> io::Result<()> {
let stdout = String::from_utf8(output.stdout.clone()).unwrap();
let stderr = String::from_utf8(output.stderr.clone()).unwrap();
let content = format!("stdout:\n{}\nstderr:\n{}", stdout, stderr);
let log_path = LOG_LOCATION.join(file_name);
std::fs::write(&log_path, content)?;
Ok(())
}

View file

@ -0,0 +1,266 @@
use std::{
collections::HashMap,
fs::{self},
};
use lsp_types::{Diagnostic, DiagnosticSeverity};
use serde_json::Value;
use testing_language_server::{
error::LSError,
spec::{DiscoverResult, FileDiagnostics, FoundFileTests, RunFileTestResult, TestItem},
};
use crate::model::Runner;
use super::util::{
clean_ansi, detect_workspaces_from_file_list, discover_with_treesitter, send_stdout,
LOG_LOCATION, MAX_CHAR_LENGTH,
};
#[derive(Eq, PartialEq, Hash, Debug)]
pub struct VitestRunner;
fn discover(file_path: &str) -> Result<Vec<TestItem>, LSError> {
// from https://github.com/marilari88/neotest-vitest/blob/353364aa05b94b09409cbef21b79c97c5564e2ce/lua/neotest-vitest/init.lua#L101
let query = r#"
; -- Namespaces --
; Matches: `describe('context')`
((call_expression
function: (identifier) @func_name (#eq? @func_name "describe")
arguments: (arguments (string (string_fragment) @namespace.name) (arrow_function))
)) @namespace.definition
; Matches: `describe.only('context')`
((call_expression
function: (member_expression
object: (identifier) @func_name (#any-of? @func_name "describe")
)
arguments: (arguments (string (string_fragment) @namespace.name) (arrow_function))
)) @namespace.definition
; Matches: `describe.each(['data'])('context')`
((call_expression
function: (call_expression
function: (member_expression
object: (identifier) @func_name (#any-of? @func_name "describe")
)
)
arguments: (arguments (string (string_fragment) @namespace.name) (arrow_function))
)) @namespace.definition
; -- Tests --
; Matches: `test('test') / it('test')`
((call_expression
function: (identifier) @func_name (#any-of? @func_name "it" "test")
arguments: (arguments (string (string_fragment) @test.name) (arrow_function))
)) @test.definition
; Matches: `test.only('test') / it.only('test')`
((call_expression
function: (member_expression
object: (identifier) @func_name (#any-of? @func_name "test" "it")
)
arguments: (arguments (string (string_fragment) @test.name) (arrow_function))
)) @test.definition
; Matches: `test.each(['data'])('test') / it.each(['data'])('test')`
((call_expression
function: (call_expression
function: (member_expression
object: (identifier) @func_name (#any-of? @func_name "it" "test")
)
)
arguments: (arguments (string (string_fragment) @test.name) (arrow_function))
)) @test.definition
"#;
discover_with_treesitter(file_path, &tree_sitter_javascript::language(), query)
}
fn parse_diagnostics(
test_result: &str,
file_paths: Vec<String>,
) -> Result<RunFileTestResult, LSError> {
let mut result_map: HashMap<String, Vec<Diagnostic>> = HashMap::new();
let json: Value = serde_json::from_str(test_result)?;
let test_results = json["testResults"].as_array().unwrap();
for test_result in test_results {
let file_path = test_result["name"].as_str().unwrap();
if !file_paths.iter().any(|path| path.contains(file_path)) {
continue;
}
let assertion_results = test_result["assertionResults"].as_array().unwrap();
'assertion: for assertion_result in assertion_results {
let status = assertion_result["status"].as_str().unwrap();
if status != "failed" {
continue 'assertion;
}
let location = assertion_result["location"].as_object().unwrap();
let failure_messages = assertion_result["failureMessages"].as_array().unwrap();
let line = location["line"].as_u64().unwrap() - 1;
failure_messages.iter().for_each(|message| {
let message = clean_ansi(message.as_str().unwrap());
let diagnostic = Diagnostic {
range: lsp_types::Range {
start: lsp_types::Position {
line: line as u32,
// Line and column number is slightly incorrect.
// ref:
// Bug in json reporter line number? · vitest-dev/vitest · Discussion #5350
// https://github.com/vitest-dev/vitest/discussions/5350
// Currently, The row numbers are from the parse result, the column numbers are 0 and MAX_CHAR_LENGTH is hard-coded.
character: 0,
},
end: lsp_types::Position {
line: line as u32,
character: MAX_CHAR_LENGTH,
},
},
message,
severity: Some(DiagnosticSeverity::ERROR),
..Diagnostic::default()
};
result_map
.entry(file_path.to_string())
.or_default()
.push(diagnostic);
})
}
}
Ok(RunFileTestResult {
data: result_map
.into_iter()
.map(|(path, diagnostics)| FileDiagnostics { path, diagnostics })
.collect(),
messages: vec![],
})
}
impl Runner for VitestRunner {
#[tracing::instrument(skip(self))]
fn discover(&self, args: testing_language_server::spec::DiscoverArgs) -> Result<(), LSError> {
let file_paths = args.file_paths;
let mut discover_results: DiscoverResult = DiscoverResult { data: vec![] };
for file_path in file_paths {
let tests = discover(&file_path)?;
discover_results.data.push(FoundFileTests {
tests,
path: file_path,
});
}
send_stdout(&discover_results)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn run_file_test(
&self,
args: testing_language_server::spec::RunFileTestArgs,
) -> Result<(), LSError> {
let file_paths = args.file_paths;
let workspace_root = args.workspace;
let log_path = LOG_LOCATION.join("vitest.json");
let log_path = log_path.to_str().unwrap();
std::process::Command::new("vitest")
.current_dir(&workspace_root)
.args([
"--watch=false",
"--reporter=json",
"--outputFile=",
log_path,
])
.output()
.unwrap();
let test_result = fs::read_to_string(log_path)?;
let diagnostics: RunFileTestResult = parse_diagnostics(&test_result, file_paths)?;
send_stdout(&diagnostics)?;
Ok(())
}
#[tracing::instrument(skip(self))]
fn detect_workspaces(
&self,
args: testing_language_server::spec::DetectWorkspaceArgs,
) -> Result<(), LSError> {
send_stdout(&detect_workspaces_from_file_list(
&args.file_paths,
&[
"package.json".to_string(),
"vitest.config.ts".to_string(),
"vitest.config.js".to_string(),
"vite.config.ts".to_string(),
"vite.config.js".to_string(),
"vitest.config.mts".to_string(),
"vitest.config.mjs".to_string(),
"vite.config.mts".to_string(),
"vite.config.mjs".to_string(),
],
))?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use lsp_types::{Position, Range};
use super::*;
#[test]
fn test_discover() {
let file_path = "../../demo/vitest/basic.test.ts";
let test_items = discover(file_path).unwrap();
assert_eq!(test_items.len(), 2);
assert_eq!(
test_items,
[
TestItem {
id: "describe text::pass".to_string(),
name: "describe text::pass".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 4,
character: 2
},
end: Position {
line: 4,
character: 10000
}
},
end_position: Range {
start: Position {
line: 6,
character: 0
},
end: Position {
line: 6,
character: 4
}
}
},
TestItem {
id: "describe text::fail".to_string(),
name: "describe text::fail".to_string(),
path: file_path.to_string(),
start_position: Range {
start: Position {
line: 8,
character: 2
},
end: Position {
line: 8,
character: 10000
}
},
end_position: Range {
start: Position {
line: 10,
character: 0
},
end: Position {
line: 10,
character: 4
}
}
}
]
)
}
}

View file

@ -0,0 +1,161 @@
{"Time":"2024-05-25T17:06:16.98464582+09:00","Action":"start","Package":"neotest_go"}
{"Time":"2024-05-25T17:06:16.986822201+09:00","Action":"run","Package":"neotest_go","Test":"TestSubtract"}
{"Time":"2024-05-25T17:06:16.986838849+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract","Output":"=== RUN TestSubtract\n"}
{"Time":"2024-05-25T17:06:16.986859373+09:00","Action":"run","Package":"neotest_go","Test":"TestSubtract/test_one"}
{"Time":"2024-05-25T17:06:16.98686856+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_one","Output":" \tError Trace:\tcases_test.go:31\n"}
{"Time":"2024-05-25T17:06:16.986871386+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_one","Output":" \tError: \tNot equal: \n"}
{"Time":"2024-05-25T17:06:16.986874139+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_one","Output":" \t \texpected: 3\n"}
{"Time":"2024-05-25T17:06:16.986876748+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_one","Output":" \t \tactual : -1\n"}
{"Time":"2024-05-25T17:06:16.986879547+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_one","Output":" \tTest: \tTestSubtract/test_one\n"}
{"Time":"2024-05-25T17:06:16.986883029+09:00","Action":"run","Package":"neotest_go","Test":"TestSubtract/test_two"}
{"Time":"2024-05-25T17:06:16.986885264+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_two","Output":"=== RUN TestSubtract/test_two\n"}
{"Time":"2024-05-25T17:06:16.986888429+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_two","Output":" cases_test.go:31: \n"}
{"Time":"2024-05-25T17:06:16.986891613+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_two","Output":" \tError Trace:\tcases_test.go:31\n"}
{"Time":"2024-05-25T17:06:16.986894222+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_two","Output":" \tError: \tNot equal: \n"}
{"Time":"2024-05-25T17:06:16.986896835+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_two","Output":" \t \texpected: 7\n"}
{"Time":"2024-05-25T17:06:16.986899333+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_two","Output":" \t \tactual : -1\n"}
{"Time":"2024-05-25T17:06:16.986901904+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_two","Output":" \tTest: \tTestSubtract/test_two\n"}
{"Time":"2024-05-25T17:06:16.986906401+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract","Output":"--- FAIL: TestSubtract (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.986910144+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_one","Output":" --- FAIL: TestSubtract/test_one (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.986913275+09:00","Action":"fail","Package":"neotest_go","Test":"TestSubtract/test_one","Elapsed":0}
{"Time":"2024-05-25T17:06:16.986916945+09:00","Action":"output","Package":"neotest_go","Test":"TestSubtract/test_two","Output":" --- FAIL: TestSubtract/test_two (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.986919709+09:00","Action":"fail","Package":"neotest_go","Test":"TestSubtract/test_two","Elapsed":0}
{"Time":"2024-05-25T17:06:16.986922033+09:00","Action":"fail","Package":"neotest_go","Test":"TestSubtract","Elapsed":0}
{"Time":"2024-05-25T17:06:16.986924322+09:00","Action":"run","Package":"neotest_go","Test":"TestAdd"}
{"Time":"2024-05-25T17:06:16.986926439+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd","Output":"=== RUN TestAdd\n"}
{"Time":"2024-05-25T17:06:16.986929637+09:00","Action":"run","Package":"neotest_go","Test":"TestAdd/test_one"}
{"Time":"2024-05-25T17:06:16.986931891+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_one","Output":"=== RUN TestAdd/test_one\n"}
{"Time":"2024-05-25T17:06:16.98693449+09:00","Action":"run","Package":"neotest_go","Test":"TestAdd/test_two"}
{"Time":"2024-05-25T17:06:16.986936644+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_two","Output":"=== RUN TestAdd/test_two\n"}
{"Time":"2024-05-25T17:06:16.986939093+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_two","Output":" cases_test.go:42: \n"}
{"Time":"2024-05-25T17:06:16.986941721+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_two","Output":" \tError Trace:\tcases_test.go:42\n"}
{"Time":"2024-05-25T17:06:16.986944261+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_two","Output":" \tError: \tNot equal: \n"}
{"Time":"2024-05-25T17:06:16.986946773+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_two","Output":" \t \texpected: 5\n"}
{"Time":"2024-05-25T17:06:16.986949247+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_two","Output":" \t \tactual : 3\n"}
{"Time":"2024-05-25T17:06:16.986951706+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_two","Output":" \tTest: \tTestAdd/test_two\n"}
{"Time":"2024-05-25T17:06:16.986954288+09:00","Action":"run","Package":"neotest_go","Test":"TestAdd/string"}
{"Time":"2024-05-25T17:06:16.986956496+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/string","Output":"=== RUN TestAdd/string\n"}
{"Time":"2024-05-25T17:06:16.986959568+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd","Output":"--- FAIL: TestAdd (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.986964387+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_one","Output":" --- PASS: TestAdd/test_one (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.986967557+09:00","Action":"pass","Package":"neotest_go","Test":"TestAdd/test_one","Elapsed":0}
{"Time":"2024-05-25T17:06:16.986970137+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/test_two","Output":" --- FAIL: TestAdd/test_two (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.986973398+09:00","Action":"fail","Package":"neotest_go","Test":"TestAdd/test_two","Elapsed":0}
{"Time":"2024-05-25T17:06:16.986976554+09:00","Action":"output","Package":"neotest_go","Test":"TestAdd/string","Output":" --- PASS: TestAdd/string (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.986979769+09:00","Action":"pass","Package":"neotest_go","Test":"TestAdd/string","Elapsed":0}
{"Time":"2024-05-25T17:06:16.98698262+09:00","Action":"fail","Package":"neotest_go","Test":"TestAdd","Elapsed":0}
{"Time":"2024-05-25T17:06:16.98698541+09:00","Action":"run","Package":"neotest_go","Test":"TestAddOne"}
{"Time":"2024-05-25T17:06:16.986987976+09:00","Action":"output","Package":"neotest_go","Test":"TestAddOne","Output":"=== RUN TestAddOne\n"}
{"Time":"2024-05-25T17:06:16.986990912+09:00","Action":"output","Package":"neotest_go","Test":"TestAddOne","Output":"--- PASS: TestAddOne (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.986994224+09:00","Action":"pass","Package":"neotest_go","Test":"TestAddOne","Elapsed":0}
{"Time":"2024-05-25T17:06:16.986996509+09:00","Action":"run","Package":"neotest_go","Test":"TestAddTwo"}
{"Time":"2024-05-25T17:06:16.986999108+09:00","Action":"output","Package":"neotest_go","Test":"TestAddTwo","Output":"=== RUN TestAddTwo\n"}
{"Time":"2024-05-25T17:06:16.98700243+09:00","Action":"output","Package":"neotest_go","Test":"TestAddTwo","Output":"--- PASS: TestAddTwo (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987005287+09:00","Action":"pass","Package":"neotest_go","Test":"TestAddTwo","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987008349+09:00","Action":"run","Package":"neotest_go","Test":"TestSomeTest"}
{"Time":"2024-05-25T17:06:16.987011032+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest","Output":"=== RUN TestSomeTest\n"}
{"Time":"2024-05-25T17:06:16.987014678+09:00","Action":"run","Package":"neotest_go","Test":"TestSomeTest/AccessDenied1"}
{"Time":"2024-05-25T17:06:16.987017664+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied1","Output":"=== RUN TestSomeTest/AccessDenied1\n"}
{"Time":"2024-05-25T17:06:16.98702332+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied1","Output":"AccessDenied1 GET /api/nothing lalala 403\n"}
{"Time":"2024-05-25T17:06:16.987030097+09:00","Action":"run","Package":"neotest_go","Test":"TestSomeTest/AccessDenied2"}
{"Time":"2024-05-25T17:06:16.9870326+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied2","Output":"=== RUN TestSomeTest/AccessDenied2\n"}
{"Time":"2024-05-25T17:06:16.987035243+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied2","Output":"AccessDenied2 GET /api/nothing lalala 403\n"}
{"Time":"2024-05-25T17:06:16.98703803+09:00","Action":"run","Package":"neotest_go","Test":"TestSomeTest/AccessDenied3"}
{"Time":"2024-05-25T17:06:16.987040299+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied3","Output":"=== RUN TestSomeTest/AccessDenied3\n"}
{"Time":"2024-05-25T17:06:16.987042979+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied3","Output":"AccessDenied3 GET /api/nothing lalala 403\n"}
{"Time":"2024-05-25T17:06:16.987045694+09:00","Action":"run","Package":"neotest_go","Test":"TestSomeTest/AccessDenied4"}
{"Time":"2024-05-25T17:06:16.987048493+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied4","Output":"=== RUN TestSomeTest/AccessDenied4\n"}
{"Time":"2024-05-25T17:06:16.987051059+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied4","Output":"AccessDenied4 GET /api/nothing lalala 403\n"}
{"Time":"2024-05-25T17:06:16.987053703+09:00","Action":"run","Package":"neotest_go","Test":"TestSomeTest/AccessDenied5"}
{"Time":"2024-05-25T17:06:16.987055897+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied5","Output":"=== RUN TestSomeTest/AccessDenied5\n"}
{"Time":"2024-05-25T17:06:16.987058733+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied5","Output":"AccessDenied5 GET /api/nothing lalala 403\n"}
{"Time":"2024-05-25T17:06:16.987062114+09:00","Action":"run","Package":"neotest_go","Test":"TestSomeTest/AccessDenied6"}
{"Time":"2024-05-25T17:06:16.987064491+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied6","Output":"=== RUN TestSomeTest/AccessDenied6\n"}
{"Time":"2024-05-25T17:06:16.987067709+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied6","Output":"AccessDenied6 GET /api/nothing lalala 403\n"}
{"Time":"2024-05-25T17:06:16.987076359+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest","Output":"--- PASS: TestSomeTest (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987079722+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied1","Output":" --- PASS: TestSomeTest/AccessDenied1 (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987082855+09:00","Action":"pass","Package":"neotest_go","Test":"TestSomeTest/AccessDenied1","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987085329+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied2","Output":" --- PASS: TestSomeTest/AccessDenied2 (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987088221+09:00","Action":"pass","Package":"neotest_go","Test":"TestSomeTest/AccessDenied2","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987090756+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied3","Output":" --- PASS: TestSomeTest/AccessDenied3 (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987094115+09:00","Action":"pass","Package":"neotest_go","Test":"TestSomeTest/AccessDenied3","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987096613+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied4","Output":" --- PASS: TestSomeTest/AccessDenied4 (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987099588+09:00","Action":"pass","Package":"neotest_go","Test":"TestSomeTest/AccessDenied4","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987104022+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied5","Output":" --- PASS: TestSomeTest/AccessDenied5 (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987107814+09:00","Action":"pass","Package":"neotest_go","Test":"TestSomeTest/AccessDenied5","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987110865+09:00","Action":"output","Package":"neotest_go","Test":"TestSomeTest/AccessDenied6","Output":" --- PASS: TestSomeTest/AccessDenied6 (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987113695+09:00","Action":"pass","Package":"neotest_go","Test":"TestSomeTest/AccessDenied6","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987116401+09:00","Action":"pass","Package":"neotest_go","Test":"TestSomeTest","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987119142+09:00","Action":"run","Package":"neotest_go","Test":"TestSplit"}
{"Time":"2024-05-25T17:06:16.987121813+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit","Output":"=== RUN TestSplit\n"}
{"Time":"2024-05-25T17:06:16.987124271+09:00","Action":"run","Package":"neotest_go","Test":"TestSplit/simple"}
{"Time":"2024-05-25T17:06:16.987126899+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/simple","Output":"=== RUN TestSplit/simple\n"}
{"Time":"2024-05-25T17:06:16.987129965+09:00","Action":"run","Package":"neotest_go","Test":"TestSplit/wrong_sep"}
{"Time":"2024-05-25T17:06:16.987132221+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/wrong_sep","Output":"=== RUN TestSplit/wrong_sep\n"}
{"Time":"2024-05-25T17:06:16.98713529+09:00","Action":"run","Package":"neotest_go","Test":"TestSplit/no_sep"}
{"Time":"2024-05-25T17:06:16.987138035+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/no_sep","Output":"=== RUN TestSplit/no_sep\n"}
{"Time":"2024-05-25T17:06:16.987140671+09:00","Action":"run","Package":"neotest_go","Test":"TestSplit/trailing_sep"}
{"Time":"2024-05-25T17:06:16.987143812+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/trailing_sep","Output":"=== RUN TestSplit/trailing_sep\n"}
{"Time":"2024-05-25T17:06:16.987148473+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/trailing_sep","Output":" map_table_test.go:25: trailing sep: expected: [a b c], got: [a b c ]\n"}
{"Time":"2024-05-25T17:06:16.987152641+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit","Output":"--- FAIL: TestSplit (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987156528+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/simple","Output":" --- PASS: TestSplit/simple (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.98716009+09:00","Action":"pass","Package":"neotest_go","Test":"TestSplit/simple","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987163018+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/wrong_sep","Output":" --- PASS: TestSplit/wrong_sep (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987166426+09:00","Action":"pass","Package":"neotest_go","Test":"TestSplit/wrong_sep","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987169756+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/no_sep","Output":" --- PASS: TestSplit/no_sep (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987173446+09:00","Action":"pass","Package":"neotest_go","Test":"TestSplit/no_sep","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987176509+09:00","Action":"output","Package":"neotest_go","Test":"TestSplit/trailing_sep","Output":" --- FAIL: TestSplit/trailing_sep (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987179658+09:00","Action":"fail","Package":"neotest_go","Test":"TestSplit/trailing_sep","Elapsed":0}
{"Time":"2024-05-25T17:06:16.9871823+09:00","Action":"fail","Package":"neotest_go","Test":"TestSplit","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987185013+09:00","Action":"run","Package":"neotest_go","Test":"TestExampleTestSuite"}
{"Time":"2024-05-25T17:06:16.987187634+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite","Output":"=== RUN TestExampleTestSuite\n"}
{"Time":"2024-05-25T17:06:16.987358969+09:00","Action":"run","Package":"neotest_go","Test":"TestExampleTestSuite/TestExample"}
{"Time":"2024-05-25T17:06:16.987372775+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExample","Output":"=== RUN TestExampleTestSuite/TestExample\n"}
{"Time":"2024-05-25T17:06:16.987378537+09:00","Action":"run","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure"}
{"Time":"2024-05-25T17:06:16.987383079+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Output":"=== RUN TestExampleTestSuite/TestExampleFailure\n"}
{"Time":"2024-05-25T17:06:16.987611537+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Output":" suite_test.go:32: \n"}
{"Time":"2024-05-25T17:06:16.987626951+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Output":" \tError Trace:\tsuite_test.go:32\n"}
{"Time":"2024-05-25T17:06:16.987631582+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Output":" \tError: \tNot equal: \n"}
{"Time":"2024-05-25T17:06:16.987635334+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Output":" \t \texpected: 5\n"}
{"Time":"2024-05-25T17:06:16.9876384+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Output":" \t \tactual : 3\n"}
{"Time":"2024-05-25T17:06:16.987641353+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Output":" \tTest: \tTestExampleTestSuite/TestExampleFailure\n"}
{"Time":"2024-05-25T17:06:16.987649622+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite","Output":"--- FAIL: TestExampleTestSuite (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987655287+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExample","Output":" --- PASS: TestExampleTestSuite/TestExample (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987659094+09:00","Action":"pass","Package":"neotest_go","Test":"TestExampleTestSuite/TestExample","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987663126+09:00","Action":"output","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Output":" --- FAIL: TestExampleTestSuite/TestExampleFailure (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987666501+09:00","Action":"fail","Package":"neotest_go","Test":"TestExampleTestSuite/TestExampleFailure","Elapsed":0}
{"Time":"2024-05-25T17:06:16.9876691+09:00","Action":"fail","Package":"neotest_go","Test":"TestExampleTestSuite","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987671645+09:00","Action":"run","Package":"neotest_go","Test":"TestOdd"}
{"Time":"2024-05-25T17:06:16.987674132+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd","Output":"=== RUN TestOdd\n"}
{"Time":"2024-05-25T17:06:16.987676921+09:00","Action":"run","Package":"neotest_go","Test":"TestOdd/odd"}
{"Time":"2024-05-25T17:06:16.987680637+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd/odd","Output":"=== RUN TestOdd/odd\n"}
{"Time":"2024-05-25T17:06:16.987683855+09:00","Action":"run","Package":"neotest_go","Test":"TestOdd/odd/5_is_odd"}
{"Time":"2024-05-25T17:06:16.98769839+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd/odd/5_is_odd","Output":"=== RUN TestOdd/odd/5_is_odd\n"}
{"Time":"2024-05-25T17:06:16.987711715+09:00","Action":"run","Package":"neotest_go","Test":"TestOdd/odd/5_is_odd/9_is_odd"}
{"Time":"2024-05-25T17:06:16.987713743+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd/odd/5_is_odd/9_is_odd","Output":"=== RUN TestOdd/odd/5_is_odd/9_is_odd\n"}
{"Time":"2024-05-25T17:06:16.987715909+09:00","Action":"run","Package":"neotest_go","Test":"TestOdd/odd/7_is_odd"}
{"Time":"2024-05-25T17:06:16.987717512+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd/odd/7_is_odd","Output":"=== RUN TestOdd/odd/7_is_odd\n"}
{"Time":"2024-05-25T17:06:16.987720928+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd","Output":"--- PASS: TestOdd (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987723417+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd/odd","Output":" --- PASS: TestOdd/odd (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987726445+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd/odd/5_is_odd","Output":" --- PASS: TestOdd/odd/5_is_odd (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987728552+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd/odd/5_is_odd/9_is_odd","Output":" --- PASS: TestOdd/odd/5_is_odd/9_is_odd (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.98773067+09:00","Action":"pass","Package":"neotest_go","Test":"TestOdd/odd/5_is_odd/9_is_odd","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987733354+09:00","Action":"pass","Package":"neotest_go","Test":"TestOdd/odd/5_is_odd","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987734868+09:00","Action":"output","Package":"neotest_go","Test":"TestOdd/odd/7_is_odd","Output":" --- PASS: TestOdd/odd/7_is_odd (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987736884+09:00","Action":"pass","Package":"neotest_go","Test":"TestOdd/odd/7_is_odd","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987738363+09:00","Action":"pass","Package":"neotest_go","Test":"TestOdd/odd","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987739718+09:00","Action":"pass","Package":"neotest_go","Test":"TestOdd","Elapsed":0}
{"Time":"2024-05-25T17:06:16.987741167+09:00","Action":"run","Package":"neotest_go","Test":"Example_hello_ok"}
{"Time":"2024-05-25T17:06:16.987742665+09:00","Action":"output","Package":"neotest_go","Test":"Example_hello_ok","Output":"=== RUN Example_hello_ok\n"}
{"Time":"2024-05-25T17:06:16.987748658+09:00","Action":"output","Package":"neotest_go","Test":"Example_hello_ok","Output":"--- PASS: Example_hello_ok (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987750589+09:00","Action":"pass","Package":"neotest_go","Test":"Example_hello_ok","Elapsed":0}
{"Time":"2024-05-25T17:06:16.98775208+09:00","Action":"run","Package":"neotest_go","Test":"Example_hello_ng"}
{"Time":"2024-05-25T17:06:16.987753501+09:00","Action":"output","Package":"neotest_go","Test":"Example_hello_ng","Output":"=== RUN Example_hello_ng\n"}
{"Time":"2024-05-25T17:06:16.987755588+09:00","Action":"output","Package":"neotest_go","Test":"Example_hello_ng","Output":"--- FAIL: Example_hello_ng (0.00s)\n"}
{"Time":"2024-05-25T17:06:16.987757621+09:00","Action":"output","Package":"neotest_go","Test":"Example_hello_ng","Output":"got:\n"}
{"Time":"2024-05-25T17:06:16.987759383+09:00","Action":"output","Package":"neotest_go","Test":"Example_hello_ng","Output":"hello world\n"}
{"Time":"2024-05-25T17:06:16.987761201+09:00","Action":"output","Package":"neotest_go","Test":"Example_hello_ng","Output":"want:\n"}
{"Time":"2024-05-25T17:06:16.987762945+09:00","Action":"output","Package":"neotest_go","Test":"Example_hello_ng","Output":"NG pattern\n"}
{"Time":"2024-05-25T17:06:16.987764982+09:00","Action":"fail","Package":"neotest_go","Test":"Example_hello_ng","Elapsed":0}
{"Time":"2024-05-25T17:06:16.988651539+09:00","Action":"output","Package":"neotest_go","Output":"FAIL\n"}
{"Time":"2024-05-25T17:06:16.988706481+09:00","Action":"output","Package":"neotest_go","Output":"FAIL\tneotest_go\t0.004s\n"}
{"Time":"2024-05-25T17:06:16.988710395+09:00","Action":"fail","Package":"neotest_go","Elapsed":0.004}

3
demo/.helix/config.toml Normal file
View file

@ -0,0 +1,3 @@
[editor.soft-wrap]
enable = true
max-wrap = 25 # increase value to reduce forced mid-word wrapping

View file

@ -0,0 +1,23 @@
[language-server.testing-ls]
command = "testing-language-server"
args = []
[[language]]
name = "rust"
language-servers = [ { name = "testing-ls", only-features = [ "diagnostics" ] }, "rust-analyzer" ]
[[language]]
name = "typescript"
language-servers = [ { name = "testing-ls", only-features = [ "diagnostics" ] }, "typescript-language-server" ]
[[language]]
name = "php"
language-servers = [ { name = "testing-ls", only-features = [ "diagnostics" ] }, "phpactor" ]
[[language]]
name = "go"
language-servers = [ { name = "testing-ls", only-features = [ "diagnostics" ] }, "gopls" ]
[[language]]
name = "javascript"
language-servers = [ { name = "testing-ls", only-features = [ "diagnostics" ] }, "typescript-language-server" ]

49
demo/.testingls.toml Normal file
View file

@ -0,0 +1,49 @@
enableWorkspaceDiagnostics = true
[adapterCommand.cargo-test]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=cargo-test"]
include = ["/**/src/**/*.rs"]
exclude = ["/**/target/**"]
[adapterCommand.cargo-nextest]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=cargo-nextest"]
include = ["/**/src/**/*.rs"]
exclude = ["/**/target/**"]
[adapterCommand.jest]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=jest"]
include = ["/jest/*.js"]
exclude = ["/jest/**/node_modules/**/*"]
[adapterCommand.vitest]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=vitest"]
include = ["/vitest/*.test.ts", "/vitest/config/**/*.test.ts"]
exclude = ["/vitest/**/node_modules/**/*"]
[adapterCommand.deno]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=deno"]
include = ["/deno/*.ts"]
exclude = []
[adapterCommand.go]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=go-test"]
include = ["/**/*.go"]
exclude = []
[adapterCommand.node-test]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=node-test"]
include = ["/node-test/*.test.js"]
exclude = []
[adapterCommand.phpunit]
path = "testing-ls-adapter"
extra_arg = ["--test-kind=phpunit"]
include = ["/**/*Test.php"]
exclude = ["/phpunit/vendor/**/*.php"]

View file

@ -0,0 +1,18 @@
{
"languageserver": {
"testing": {
"command": "testing-language-server",
"trace.server": "verbose",
"filetypes": [
"rust",
"javascript",
"go",
"typescript",
"php"
],
"initializationOptions": {}
}
},
"deno.enable": false,
"tsserver.enable": false
}

13
demo/.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,13 @@
{
"testing.enable": true,
"filetypes": [
"rust",
"javascript",
"go",
"typescript",
"php"
],
"testing.enableWorkspaceDiagnostics": true,
"testing.server.path": "testing-language-server",
"testing.trace.server": "verbose"
}

43
demo/README.md Normal file
View file

@ -0,0 +1,43 @@
## Using `nvim-lspconfig`
The specification is not stable, so you need to set it yourself. Once the spec is stable, I will send a PR to `nvim-lspconfig`.
```
local lspconfig = require('lspconfig')
local configs = require('lspconfig.configs')
local util = require "lspconfig/util"
configs.testing_ls = {
default_config = {
cmd = { "testing-language-server" },
filetypes = { "rust" },
root_dir = util.root_pattern(".git", "Cargo.toml"),
init_options = {
enable = true,
fileTypes = {"rust"},
adapterCommand = {
rust = {
{
path = "testing-ls-adapter",
extra_arg = { "--test-kind=cargo-test", "--workspace" },
include = { "/demo/**/src/**/*.rs"},
exclude = { "/**/target/**"},
}
}
},
enableWorkspaceDiagnostics = true,
trace = {
server = "verbose"
}
}
},
docs = {
description = [[
https://github.com/kbwo/testing-language-server
Language Server for real-time testing.
]],
},
}
lspconfig.testing_ls.setup{}
```

5
demo/deno/deno.json Normal file
View file

@ -0,0 +1,5 @@
{
"tasks": {
"dev": "deno run --watch main.ts"
}
}

21
demo/deno/deno.lock generated Normal file
View file

@ -0,0 +1,21 @@
{
"version": "3",
"packages": {
"specifiers": {
"jsr:@std/assert": "jsr:@std/assert@1.0.0",
"jsr:@std/internal@^1.0.1": "jsr:@std/internal@1.0.1"
},
"jsr": {
"@std/assert@1.0.0": {
"integrity": "0e4f6d873f7f35e2a1e6194ceee39686c996b9e5d134948e644d35d4c4df2008",
"dependencies": [
"jsr:@std/internal@^1.0.1"
]
},
"@std/internal@1.0.1": {
"integrity": "6f8c7544d06a11dd256c8d6ba54b11ed870aac6c5aeafff499892662c57673e6"
}
}
},
"remote": {}
}

8
demo/deno/main.ts Normal file
View file

@ -0,0 +1,8 @@
export function add(a: number, b: number): number {
return a + b;
}
// Learn more at https://deno.land/manual/examples/module_metadata#concepts
if (import.meta.main) {
console.log("Add 2 + 3 =", add(2, 3));
}

18
demo/deno/main_test.ts Normal file
View file

@ -0,0 +1,18 @@
import { assert, assertEquals } from "jsr:@std/assert";
import { add } from "./main.ts";
const throwFn = () => {
throw new Error("error");
};
Deno.test(function addTest() {
assertEquals(add(2, 3), 5);
});
Deno.test(function fail1() {
assertEquals(add(2, 5), 5);
});
Deno.test(function fail2() {
assert(throwFn());
});

36
demo/deno/output.txt Normal file
View file

@ -0,0 +1,36 @@
running 3 tests from ./main_test.ts
addTest ... ok (0ms)
fail1 ... FAILED (1ms)
fail1 ... FAILED (0ms)
 ERRORS 
fail1 => ./main_test.ts:12:6
error: AssertionError: Values are not equal.
[Diff] Actual / Expected
- 7
+ 5
throw new AssertionError(message);
 ^
at assertEquals (https://jsr.io/@std/assert/1.0.0/equals.ts:47:9)
at fail1 (file:///home/demo/test/dneo/main_test.ts:13:3)
fail1 => ./main_test.ts:16:6
error: Error: error
throw new Error("error");
 ^
at throwFn (file:///home/demo/test/dneo/main_test.ts:5:9)
at fail1 (file:///home/demo/test/dneo/main_test.ts:17:10)
 FAILURES 
fail1 => ./main_test.ts:12:6
fail1 => ./main_test.ts:16:6
FAILED | 1 passed | 2 failed (3ms)

3
demo/go/README.md Normal file
View file

@ -0,0 +1,3 @@
This directory is from https://github.com/nvim-neotest/neotest-go/tree/main/neotest_go.
LICENSE: https://github.com/nvim-neotest/neotest-go/blob/main/LICENSE.md

9
demo/go/cases.go Normal file
View file

@ -0,0 +1,9 @@
package main
func add(a, b int) int {
return a + b
}
func subtract(a, b int) int {
return a - b
}

49
demo/go/cases_test.go Normal file
View file

@ -0,0 +1,49 @@
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSubtract(t *testing.T) {
testCases := []struct {
desc string
a int
b int
want int
}{
{
desc: "test one",
a: 1,
b: 2,
want: 3,
},
{
desc: "test two",
a: 1,
b: 2,
want: 7,
},
}
for _, tC := range testCases {
t.Run(tC.desc, func(t *testing.T) {
assert.Equal(t, tC.want, subtract(tC.a, tC.b))
})
}
}
func TestAdd(t *testing.T) {
t.Run("test one", func(t *testing.T) {
assert.Equal(t, 3, add(1, 2))
})
t.Run("test two", func(t *testing.T) {
assert.Equal(t, 5, add(1, 2))
})
variable := "string"
t.Run(variable, func(t *testing.T) {
assert.Equal(t, 3, add(1, 2))
})
}

7
demo/go/example.go Normal file
View file

@ -0,0 +1,7 @@
package main
import "fmt"
func hello() {
fmt.Println("hello world")
}

15
demo/go/example_test.go Normal file
View file

@ -0,0 +1,15 @@
package main
func Example_hello_ok() {
hello()
// Output:
// hello world
}
func Example_hello_ng() {
hello()
// Output:
// NG pattern
}

11
demo/go/go.mod Normal file
View file

@ -0,0 +1,11 @@
module neotest_go
go 1.18
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/objx v0.4.0 // indirect
github.com/stretchr/testify v1.7.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

15
demo/go/go.sum Normal file
View file

@ -0,0 +1,15 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

15
demo/go/main.go Normal file
View file

@ -0,0 +1,15 @@
package main
import "fmt"
func main() {
fmt.Println("hello world")
}
func addOne(x int) int {
return x + 1
}
func addTwo(x int) int {
return x + 2
}

View file

@ -0,0 +1,14 @@
//go:build files
// +build files
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddOne2(t *testing.T) {
assert.Equal(t, 2, addOne(1))
}

15
demo/go/main_test.go Normal file
View file

@ -0,0 +1,15 @@
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestAddOne(t *testing.T) {
assert.Equal(t, 2, addOne(1))
}
func TestAddTwo(t *testing.T) {
assert.Equal(t, 3, addTwo(1))
}

View file

@ -0,0 +1,32 @@
package main
import (
"fmt"
"net/http"
"testing"
)
func TestSomeTest(t *testing.T) {
tt := []struct {
name string
method string
url string
apiKey string
status int
}{
{name: "AccessDenied1", method: http.MethodGet, url: "/api/nothing", apiKey: "lalala", status: http.StatusForbidden},
{name: "AccessDenied2", method: http.MethodGet, url: "/api/nothing", apiKey: "lalala", status: http.StatusForbidden},
{name: "AccessDenied3", method: http.MethodGet, url: "/api/nothing", apiKey: "lalala", status: http.StatusForbidden},
{name: "AccessDenied4", method: http.MethodGet, url: "/api/nothing", apiKey: "lalala", status: http.StatusForbidden},
{name: "AccessDenied5", method: http.MethodGet, url: "/api/nothing", apiKey: "lalala", status: http.StatusForbidden},
{name: "AccessDenied6", method: http.MethodGet, url: "/api/nothing", apiKey: "lalala", status: http.StatusForbidden},
}
for _, tc := range tt {
tc := tc
t.Run(tc.name, func(_ *testing.T) {
fmt.Println(tc.name, tc.method, tc.url, tc.apiKey, tc.status)
})
}
}

40
demo/go/map_table_test.go Normal file
View file

@ -0,0 +1,40 @@
package main
import (
"reflect"
"strings"
"testing"
)
func TestSplit(t *testing.T) {
tests := map[string]struct {
input string
sep string
want []string
}{
"simple": {input: "a/b/c", sep: "/", want: []string{"a", "b", "c"}},
"wrong sep": {input: "a/b/c", sep: ",", want: []string{"a/b/c"}},
"no sep": {input: "abc", sep: "/", want: []string{"abc"}},
"trailing sep": {input: "a/b/c/", sep: "/", want: []string{"a", "b", "c"}},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
got := Split(tc.input, tc.sep)
if !reflect.DeepEqual(tc.want, got) {
t.Fatalf("%s: expected: %v, got: %v", name, tc.want, got)
}
})
}
}
func Split(s, sep string) []string {
var result []string
i := strings.Index(s, sep)
for i > -1 {
result = append(result, s[:i])
s = s[i+len(sep):]
i = strings.Index(s, sep)
}
return append(result, s)
}

39
demo/go/suite_test.go Normal file
View file

@ -0,0 +1,39 @@
package main
// Basic imports
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// Define the suite, and absorb the built-in basic suite
// functionality from testify - including a T() method which
// returns the current testing context
type ExampleTestSuite struct {
suite.Suite
VariableThatShouldStartAtFive int
}
// Make sure that VariableThatShouldStartAtFive is set to five
// before each test
func (suite *ExampleTestSuite) SetupTest() {
suite.VariableThatShouldStartAtFive = 5
}
// All methods that begin with "Test" are run as tests within a
// suite.
func (suite *ExampleTestSuite) TestExample() {
assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive)
}
func (suite *ExampleTestSuite) TestExampleFailure() {
assert.Equal(suite.T(), 5, 3)
}
// In order for 'go test' to run this suite, we need to create
// a normal test function and pass our suite to suite.Run
func TestExampleTestSuite(t *testing.T) {
suite.Run(t, new(ExampleTestSuite))
}

View file

@ -0,0 +1,25 @@
package main
import "testing"
func TestOdd(t *testing.T) {
t.Run("odd", func(t *testing.T) {
t.Run("5 is odd", func(t *testing.T) {
if 5%2 != 1 {
t.Error("5 is actually odd")
}
t.Run("9 is odd", func(t *testing.T) {
if 9%2 != 1 {
t.Error("5 is actually odd")
}
})
})
t.Run("7 is odd", func(t *testing.T) {
if 7%2 != 1 {
t.Error("7 is actually odd")
}
})
})
}

View file

@ -1,4 +1,4 @@
# test_proj
# demo
To install dependencies:

13
demo/jest/another.spec.js Normal file
View file

@ -0,0 +1,13 @@
describe("another", () => {
it("fail", () => {
expect(1).toBe(0);
});
it("pass", () => {
expect(1).toBe(1);
});
});
test("toplevel test", () => {
expect(1).toBe(2);
});

View file

@ -32,9 +32,7 @@
{
"assertionResults": [
{
"ancestorTitles": [
"index"
],
"ancestorTitles": ["index"],
"duration": 3,
"failureDetails": [
{
@ -48,7 +46,7 @@
}
],
"failureMessages": [
"Error: \u001b[2mexpect(\u001b[22m\u001b[31mreceived\u001b[39m\u001b[2m).\u001b[22mtoBe\u001b[2m(\u001b[22m\u001b[32mexpected\u001b[39m\u001b[2m) // Object.is equality\u001b[22m\n\nExpected: \u001b[32m0\u001b[39m\nReceived: \u001b[31m1\u001b[39m\n at Object.toBe (/absolute_path/test_proj/jest/index.spec.js:4:15)\n at Promise.then.completed (/absolute_path/test_proj/jest/node_modules/jest-circus/build/utils.js:298:28)\n at new Promise (<anonymous>)\n at callAsyncCircusFn (/absolute_path/test_proj/jest/node_modules/jest-circus/build/utils.js:231:10)\n at _callCircusTest (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:316:40)\n at _runTest (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:252:3)\n at _runTestsForDescribeBlock (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:126:9)\n at _runTestsForDescribeBlock (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:121:9)\n at run (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:71:3)\n at runAndTransformResultsToJestFormat (/absolute_path/test_proj/jest/node_modules/jest-circus/build/legacy-code-todo-rewrite/jestAdapterInit.js:122:21)\n at jestAdapter (/absolute_path/test_proj/jest/node_modules/jest-circus/build/legacy-code-todo-rewrite/jestAdapter.js:79:19)\n at runTestInternal (/absolute_path/test_proj/jest/node_modules/jest-runner/build/runTest.js:367:16)\n at runTest (/absolute_path/test_proj/jest/node_modules/jest-runner/build/runTest.js:444:34)"
"Error: \u001b[2mexpect(\u001b[22m\u001b[31mreceived\u001b[39m\u001b[2m).\u001b[22mtoBe\u001b[2m(\u001b[22m\u001b[32mexpected\u001b[39m\u001b[2m) // Object.is equality\u001b[22m\n\nExpected: \u001b[32m0\u001b[39m\nReceived: \u001b[31m1\u001b[39m\n at Object.toBe (/absolute_path/demo/jest/index.spec.js:4:15)\n at Promise.then.completed (/absolute_path/demo/jest/node_modules/jest-circus/build/utils.js:298:28)\n at new Promise (<anonymous>)\n at callAsyncCircusFn (/absolute_path/demo/jest/node_modules/jest-circus/build/utils.js:231:10)\n at _callCircusTest (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:316:40)\n at _runTest (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:252:3)\n at _runTestsForDescribeBlock (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:126:9)\n at _runTestsForDescribeBlock (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:121:9)\n at run (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:71:3)\n at runAndTransformResultsToJestFormat (/absolute_path/demo/jest/node_modules/jest-circus/build/legacy-code-todo-rewrite/jestAdapterInit.js:122:21)\n at jestAdapter (/absolute_path/demo/jest/node_modules/jest-circus/build/legacy-code-todo-rewrite/jestAdapter.js:79:19)\n at runTestInternal (/absolute_path/demo/jest/node_modules/jest-runner/build/runTest.js:367:16)\n at runTest (/absolute_path/demo/jest/node_modules/jest-runner/build/runTest.js:444:34)"
],
"fullName": "index fail",
"invocations": 1,
@ -64,7 +62,7 @@
],
"endTime": 1714484637874,
"message": "\u001b[1m\u001b[31m \u001b[1m● \u001b[22m\u001b[1mindex fail\u001b[39m\u001b[22m\n\n \u001b[2mexpect(\u001b[22m\u001b[31mreceived\u001b[39m\u001b[2m).\u001b[22mtoBe\u001b[2m(\u001b[22m\u001b[32mexpected\u001b[39m\u001b[2m) // Object.is equality\u001b[22m\n\n Expected: \u001b[32m0\u001b[39m\n Received: \u001b[31m1\u001b[39m\n\u001b[2m\u001b[22m\n\u001b[2m \u001b[0m \u001b[90m 2 |\u001b[39m\u001b[22m\n\u001b[2m \u001b[90m 3 |\u001b[39m it(\u001b[32m\"fail\"\u001b[39m\u001b[33m,\u001b[39m () \u001b[33m=>\u001b[39m {\u001b[22m\n\u001b[2m \u001b[31m\u001b[1m>\u001b[22m\u001b[2m\u001b[39m\u001b[90m 4 |\u001b[39m expect(\u001b[35m1\u001b[39m)\u001b[33m.\u001b[39mtoBe(\u001b[35m0\u001b[39m)\u001b[22m\n\u001b[2m \u001b[90m |\u001b[39m \u001b[31m\u001b[1m^\u001b[22m\u001b[2m\u001b[39m\u001b[22m\n\u001b[2m \u001b[90m 5 |\u001b[39m })\u001b[22m\n\u001b[2m \u001b[90m 6 |\u001b[39m\u001b[22m\n\u001b[2m \u001b[90m 7 |\u001b[39m })\u001b[0m\u001b[22m\n\u001b[2m\u001b[22m\n\u001b[2m \u001b[2mat Object.toBe (\u001b[22m\u001b[2m\u001b[0m\u001b[36mindex.spec.js\u001b[39m\u001b[0m\u001b[2m:4:15)\u001b[22m\u001b[2m\u001b[22m\n",
"name": "/absolute_path/test_proj/jest/index.spec.js",
"name": "/absolute_path/demo/jest/index.spec.js",
"startTime": 1714484637684,
"status": "failed",
"summary": ""
@ -72,9 +70,7 @@
{
"assertionResults": [
{
"ancestorTitles": [
"another"
],
"ancestorTitles": ["another"],
"duration": 2,
"failureDetails": [
{
@ -88,7 +84,7 @@
}
],
"failureMessages": [
"Error: \u001b[2mexpect(\u001b[22m\u001b[31mreceived\u001b[39m\u001b[2m).\u001b[22mtoBe\u001b[2m(\u001b[22m\u001b[32mexpected\u001b[39m\u001b[2m) // Object.is equality\u001b[22m\n\nExpected: \u001b[32m0\u001b[39m\nReceived: \u001b[31m1\u001b[39m\n at Object.toBe (/absolute_path/test_proj/jest/another.spec.js:4:15)\n at Promise.then.completed (/absolute_path/test_proj/jest/node_modules/jest-circus/build/utils.js:298:28)\n at new Promise (<anonymous>)\n at callAsyncCircusFn (/absolute_path/test_proj/jest/node_modules/jest-circus/build/utils.js:231:10)\n at _callCircusTest (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:316:40)\n at _runTest (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:252:3)\n at _runTestsForDescribeBlock (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:126:9)\n at _runTestsForDescribeBlock (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:121:9)\n at run (/absolute_path/test_proj/jest/node_modules/jest-circus/build/run.js:71:3)\n at runAndTransformResultsToJestFormat (/absolute_path/test_proj/jest/node_modules/jest-circus/build/legacy-code-todo-rewrite/jestAdapterInit.js:122:21)\n at jestAdapter (/absolute_path/test_proj/jest/node_modules/jest-circus/build/legacy-code-todo-rewrite/jestAdapter.js:79:19)\n at runTestInternal (/absolute_path/test_proj/jest/node_modules/jest-runner/build/runTest.js:367:16)\n at runTest (/absolute_path/test_proj/jest/node_modules/jest-runner/build/runTest.js:444:34)"
"Error: \u001b[2mexpect(\u001b[22m\u001b[31mreceived\u001b[39m\u001b[2m).\u001b[22mtoBe\u001b[2m(\u001b[22m\u001b[32mexpected\u001b[39m\u001b[2m) // Object.is equality\u001b[22m\n\nExpected: \u001b[32m0\u001b[39m\nReceived: \u001b[31m1\u001b[39m\n at Object.toBe (/absolute_path/demo/jest/another.spec.js:4:15)\n at Promise.then.completed (/absolute_path/demo/jest/node_modules/jest-circus/build/utils.js:298:28)\n at new Promise (<anonymous>)\n at callAsyncCircusFn (/absolute_path/demo/jest/node_modules/jest-circus/build/utils.js:231:10)\n at _callCircusTest (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:316:40)\n at _runTest (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:252:3)\n at _runTestsForDescribeBlock (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:126:9)\n at _runTestsForDescribeBlock (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:121:9)\n at run (/absolute_path/demo/jest/node_modules/jest-circus/build/run.js:71:3)\n at runAndTransformResultsToJestFormat (/absolute_path/demo/jest/node_modules/jest-circus/build/legacy-code-todo-rewrite/jestAdapterInit.js:122:21)\n at jestAdapter (/absolute_path/demo/jest/node_modules/jest-circus/build/legacy-code-todo-rewrite/jestAdapter.js:79:19)\n at runTestInternal (/absolute_path/demo/jest/node_modules/jest-runner/build/runTest.js:367:16)\n at runTest (/absolute_path/demo/jest/node_modules/jest-runner/build/runTest.js:444:34)"
],
"fullName": "another fail",
"invocations": 1,
@ -102,9 +98,7 @@
"title": "fail"
},
{
"ancestorTitles": [
"another"
],
"ancestorTitles": ["another"],
"duration": 1,
"failureDetails": [],
"failureMessages": [],
@ -122,7 +116,7 @@
],
"endTime": 1714484637974,
"message": "\u001b[1m\u001b[31m \u001b[1m● \u001b[22m\u001b[1manother fail\u001b[39m\u001b[22m\n\n \u001b[2mexpect(\u001b[22m\u001b[31mreceived\u001b[39m\u001b[2m).\u001b[22mtoBe\u001b[2m(\u001b[22m\u001b[32mexpected\u001b[39m\u001b[2m) // Object.is equality\u001b[22m\n\n Expected: \u001b[32m0\u001b[39m\n Received: \u001b[31m1\u001b[39m\n\u001b[2m\u001b[22m\n\u001b[2m \u001b[0m \u001b[90m 2 |\u001b[39m\u001b[22m\n\u001b[2m \u001b[90m 3 |\u001b[39m it(\u001b[32m\"fail\"\u001b[39m\u001b[33m,\u001b[39m () \u001b[33m=>\u001b[39m {\u001b[22m\n\u001b[2m \u001b[31m\u001b[1m>\u001b[22m\u001b[2m\u001b[39m\u001b[90m 4 |\u001b[39m expect(\u001b[35m1\u001b[39m)\u001b[33m.\u001b[39mtoBe(\u001b[35m0\u001b[39m)\u001b[22m\n\u001b[2m \u001b[90m |\u001b[39m \u001b[31m\u001b[1m^\u001b[22m\u001b[2m\u001b[39m\u001b[22m\n\u001b[2m \u001b[90m 5 |\u001b[39m })\u001b[22m\n\u001b[2m \u001b[90m 6 |\u001b[39m\u001b[22m\n\u001b[2m \u001b[90m 7 |\u001b[39m it(\u001b[32m\"pass\"\u001b[39m\u001b[33m,\u001b[39m () \u001b[33m=>\u001b[39m {\u001b[0m\u001b[22m\n\u001b[2m\u001b[22m\n\u001b[2m \u001b[2mat Object.toBe (\u001b[22m\u001b[2m\u001b[0m\u001b[36manother.spec.js\u001b[39m\u001b[0m\u001b[2m:4:15)\u001b[22m\u001b[2m\u001b[22m\n",
"name": "/absolute_path/test_proj/jest/another.spec.js",
"name": "/absolute_path/demo/jest/another.spec.js",
"startTime": 1714484637879,
"status": "failed",
"summary": ""

View file

@ -1,5 +1,5 @@
{
"name": "test_proj",
"name": "demo",
"module": "index.js",
"type": "module",
"devDependencies": {
@ -10,4 +10,4 @@
"peerDependencies": {
"typescript": "^5.0.0"
}
}
}

View file

@ -0,0 +1,173 @@
const test = require("node:test");
const { describe, it } = require("node:test");
const assert = require("node:assert");
const { throwError } = require("./util.js");
// # Basic example
test("synchronous passing test", (t) => {
// This test passes because it does not throw an exception.
assert.strictEqual(1, 1);
});
test("synchronous failing test", (t) => {
// This test fails because it throws an exception.
assert.strictEqual(1, 2);
});
test("asynchronous passing test", async (t) => {
// This test passes because the Promise returned by the async
// function is settled and not rejected.
assert.strictEqual(1, 1);
});
test("asynchronous failing test", async (t) => {
// This test fails because the Promise returned by the async
// function is rejected.
assert.strictEqual(1, 2);
});
test("failing test using Promises", (t) => {
// Promises can be used directly as well.
return new Promise((resolve, reject) => {
setImmediate(() => {
reject(new Error("this will cause the test to fail"));
});
});
});
test("callback passing test", (t, done) => {
// done() is the callback function. When the setImmediate() runs, it invokes
// done() with no arguments.
setImmediate(done);
});
test("callback failing test", (t, done) => {
// When the setImmediate() runs, done() is invoked with an Error object and
// the test fails.
setImmediate(() => {
done(new Error("callback failure"));
});
});
// # Subtests
test("top level test", async (t) => {
await t.test("subtest 1", (t) => {
assert.strictEqual(1, 1);
});
await t.test("subtest 2", (t) => {
assert.strictEqual(2, 2);
});
});
// # Skipping tests
// The skip option is used, but no message is provided.
test("skip option", { skip: true }, (t) => {
// This code is never executed.
});
// The skip option is used, and a message is provided.
test("skip option with message", { skip: "this is skipped" }, (t) => {
// This code is never executed.
});
test("skip() method", (t) => {
// Make sure to return here as well if the test contains additional logic.
t.skip();
});
test("skip() method with message", (t) => {
// Make sure to return here as well if the test contains additional logic.
t.skip("this is skipped");
});
// # TODO tests
// The todo option is used, but no message is provided.
test("todo option", { todo: true }, (t) => {
// This code is executed, but not treated as a failure.
throw new Error("this does not fail the test");
});
// The todo option is used, and a message is provided.
test("todo option with message", { todo: "this is a todo test" }, (t) => {
// This code is executed.
});
test("todo() method", (t) => {
t.todo();
});
test("todo() method with message", (t) => {
t.todo("this is a todo test and is not treated as a failure");
throw new Error("this does not fail the test");
});
// # describe() and it() aliases
describe("A thing", () => {
it("should work", () => {
assert.strictEqual(1, 1);
});
it("should be ok", () => {
assert.strictEqual(2, 2);
});
describe("a nested thing", () => {
it("should work", () => {
assert.strictEqual(3, 3);
});
});
});
// # only tests
// Assume Node.js is run with the --test-only command-line option.
// The suite's 'only' option is set, so these tests are run.
test("only: this test is run", { only: true }, async (t) => {
// Within this test, all subtests are run by default.
await t.test("running subtest");
// The test context can be updated to run subtests with the 'only' option.
t.runOnly(true);
await t.test("this subtest is now skipped");
await t.test("this subtest is run", { only: true });
// Switch the context back to execute all tests.
t.runOnly(false);
await t.test("this subtest is now run");
// Explicitly do not run these tests.
await t.test("skipped subtest 3", { only: false });
await t.test("skipped subtest 4", { skip: true });
});
// The 'only' option is not set, so this test is skipped.
test("only: this test is not run", () => {
// This code is not run.
throw new Error("fail");
});
describe("A suite", () => {
// The 'only' option is set, so this test is run.
it("this test is run A ", { only: true }, () => {
// This code is run.
});
it("this test is not run B", () => {
// This code is not run.
throw new Error("fail");
});
});
describe.only("B suite", () => {
// The 'only' option is set, so this test is run.
it("this test is run C", () => {
// This code is run.
});
it("this test is run D", () => {
// This code is run.
});
});
test("import from external file. this must be fail", () => {
throwError();
});

229
demo/node-test/output.xml Normal file
View file

@ -0,0 +1,229 @@
<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testcase name="synchronous passing test" time="0.000819" classname="test"/>
<testcase name="synchronous failing test" time="0.001318" classname="test" failure="Expected values to be strictly equal:1 !== 2">
<failure type="testCodeFailure" message="Expected values to be strictly equal:1 !== 2">
[Error [ERR_TEST_FAILURE]: Expected values to be strictly equal:
1 !== 2
] {
failureType: 'testCodeFailure',
cause: AssertionError [ERR_ASSERTION]: Expected values to be strictly equal:
1 !== 2
at TestContext.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:13:10)
at Test.runInAsyncScope (node:async_hooks:203:9)
at Test.run (node:internal/test_runner/test:631:25)
at Test.processPendingSubtests (node:internal/test_runner/test:374:18)
at Test.postRun (node:internal/test_runner/test:715:19)
at Test.run (node:internal/test_runner/test:673:12)
at async startSubtest (node:internal/test_runner/harness:214:3) {
generatedMessage: true,
code: 'ERR_ASSERTION',
actual: 1,
expected: 2,
operator: 'strictEqual'
},
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
<testcase name="asynchronous passing test" time="0.000764" classname="test"/>
<testcase name="asynchronous failing test" time="0.000411" classname="test" failure="Expected values to be strictly equal:1 !== 2">
<failure type="testCodeFailure" message="Expected values to be strictly equal:1 !== 2">
[Error [ERR_TEST_FAILURE]: Expected values to be strictly equal:
1 !== 2
] {
failureType: 'testCodeFailure',
cause: AssertionError [ERR_ASSERTION]: Expected values to be strictly equal:
1 !== 2
at TestContext.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:25:10)
at Test.runInAsyncScope (node:async_hooks:203:9)
at Test.run (node:internal/test_runner/test:631:25)
at Test.processPendingSubtests (node:internal/test_runner/test:374:18)
at Test.postRun (node:internal/test_runner/test:715:19)
at Test.run (node:internal/test_runner/test:673:12)
at async Test.processPendingSubtests (node:internal/test_runner/test:374:7) {
generatedMessage: true,
code: 'ERR_ASSERTION',
actual: 1,
expected: 2,
operator: 'strictEqual'
},
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
<testcase name="failing test using Promises" time="0.005315" classname="test" failure="this will cause the test to fail">
<failure type="testCodeFailure" message="this will cause the test to fail">
[Error [ERR_TEST_FAILURE]: this will cause the test to fail] {
failureType: 'testCodeFailure',
cause: Error: this will cause the test to fail
at Immediate.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:32:14)
at process.processImmediate (node:internal/timers:476:21),
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
<testcase name="callback passing test" time="0.000442" classname="test"/>
<testcase name="callback failing test" time="0.000283" classname="test" failure="callback failure">
<failure type="testCodeFailure" message="callback failure">
[Error [ERR_TEST_FAILURE]: callback failure] {
failureType: 'testCodeFailure',
cause: Error: callback failure
at Immediate.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:47:10)
at process.processImmediate (node:internal/timers:476:21),
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
<testsuite name="top level test" time="0.000468" disabled="0" errors="0" tests="2" failures="0" skipped="0" hostname="kbwo-21cbcto1ww">
<testcase name="subtest 1" time="0.000136" classname="test"/>
<testcase name="subtest 2" time="0.000058" classname="test"/>
</testsuite>
<testcase name="skip option" time="0.000050" classname="test">
<skipped type="skipped" message="true"/>
</testcase>
<testcase name="skip option with message" time="0.000038" classname="test">
<skipped type="skipped" message="this is skipped"/>
</testcase>
<testcase name="skip() method" time="0.000052" classname="test">
<skipped type="skipped" message="true"/>
</testcase>
<testcase name="skip() method with message" time="0.000043" classname="test">
<skipped type="skipped" message="this is skipped"/>
</testcase>
<testcase name="todo option" time="0.000053" classname="test" failure="this does not fail the test">
<skipped type="todo" message="true"/>
<failure type="testCodeFailure" message="this does not fail the test">
[Error [ERR_TEST_FAILURE]: this does not fail the test] {
failureType: 'testCodeFailure',
cause: Error: this does not fail the test
at TestContext.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:87:9)
at Test.runInAsyncScope (node:async_hooks:203:9)
at Test.run (node:internal/test_runner/test:631:25)
at Test.processPendingSubtests (node:internal/test_runner/test:374:18)
at Test.postRun (node:internal/test_runner/test:715:19)
at Test.run (node:internal/test_runner/test:673:12)
at async Test.processPendingSubtests (node:internal/test_runner/test:374:7),
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
<testcase name="todo option with message" time="0.000040" classname="test">
<skipped type="todo" message="this is a todo test"/>
</testcase>
<testcase name="todo() method" time="0.000134" classname="test">
<skipped type="todo" message="true"/>
</testcase>
<testcase name="todo() method with message" time="0.000067" classname="test" failure="this does not fail the test">
<skipped type="todo" message="this is a todo test and is not treated as a failure"/>
<failure type="testCodeFailure" message="this does not fail the test">
[Error [ERR_TEST_FAILURE]: this does not fail the test] {
failureType: 'testCodeFailure',
cause: Error: this does not fail the test
at TestContext.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:101:9)
at Test.runInAsyncScope (node:async_hooks:203:9)
at Test.run (node:internal/test_runner/test:631:25)
at Test.processPendingSubtests (node:internal/test_runner/test:374:18)
at Test.postRun (node:internal/test_runner/test:715:19)
at Test.run (node:internal/test_runner/test:673:12)
at async Test.processPendingSubtests (node:internal/test_runner/test:374:7),
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
<testsuite name="A thing" time="0.000482" disabled="0" errors="0" tests="3" failures="0" skipped="0" hostname="kbwo-21cbcto1ww">
<testcase name="should work" time="0.000108" classname="test"/>
<testcase name="should be ok" time="0.000041" classname="test"/>
<testsuite name="a nested thing" time="0.000099" disabled="0" errors="0" tests="1" failures="0" skipped="0" hostname="kbwo-21cbcto1ww">
<testcase name="should work" time="0.000046" classname="test"/>
</testsuite>
</testsuite>
<testsuite name="this test is run" time="0.000507" disabled="0" errors="0" tests="6" failures="0" skipped="1" hostname="kbwo-21cbcto1ww">
<testcase name="running subtest" time="0.000051" classname="test"/>
<testcase name="this subtest is now skipped" time="0.000025" classname="test"/>
<!-- 'only' and 'runOnly' require the &#45;&#45;test-only command-line option. -->
<testcase name="this subtest is run" time="0.000023" classname="test"/>
<!-- 'only' and 'runOnly' require the &#45;&#45;test-only command-line option. -->
<testcase name="this subtest is now run" time="0.000022" classname="test"/>
<testcase name="skipped subtest 3" time="0.000019" classname="test"/>
<!-- 'only' and 'runOnly' require the &#45;&#45;test-only command-line option. -->
<testcase name="skipped subtest 4" time="0.000029" classname="test">
<skipped type="skipped" message="true"/>
</testcase>
</testsuite>
<!-- 'only' and 'runOnly' require the &#45;&#45;test-only command-line option. -->
<testcase name="this test is not run" time="0.000057" classname="test" failure="fail">
<failure type="testCodeFailure" message="fail">
[Error [ERR_TEST_FAILURE]: fail] {
failureType: 'testCodeFailure',
cause: Error: fail
at TestContext.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:145:9)
at Test.runInAsyncScope (node:async_hooks:203:9)
at Test.run (node:internal/test_runner/test:631:25)
at Test.processPendingSubtests (node:internal/test_runner/test:374:18)
at Test.postRun (node:internal/test_runner/test:715:19)
at Test.run (node:internal/test_runner/test:673:12)
at async Test.processPendingSubtests (node:internal/test_runner/test:374:7),
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
<testsuite name="a suite" time="0.000167" disabled="0" errors="0" tests="2" failures="1" skipped="0" hostname="kbwo-21cbcto1ww">
<testcase name="this test is run" time="0.000044" classname="test"/>
<!-- 'only' and 'runOnly' require the &#45;&#45;test-only command-line option. -->
<testcase name="this test is not run" time="0.000040" classname="test" failure="fail">
<failure type="testCodeFailure" message="fail">
[Error [ERR_TEST_FAILURE]: fail] {
failureType: 'testCodeFailure',
cause: Error: fail
at TestContext.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:156:11)
at Test.runInAsyncScope (node:async_hooks:203:9)
at Test.run (node:internal/test_runner/test:631:25)
at Suite.processPendingSubtests (node:internal/test_runner/test:374:18)
at Test.postRun (node:internal/test_runner/test:715:19)
at Test.run (node:internal/test_runner/test:673:12)
at async Promise.all (index 0)
at async Suite.run (node:internal/test_runner/test:948:7)
at async Test.processPendingSubtests (node:internal/test_runner/test:374:7),
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
</testsuite>
<testsuite name="a suite" time="0.000180" disabled="0" errors="0" tests="2" failures="0" skipped="0" hostname="kbwo-21cbcto1ww">
<testcase name="this test is run" time="0.000038" classname="test"/>
<testcase name="this test is run" time="0.000028" classname="test"/>
</testsuite>
<!-- 'only' and 'runOnly' require the &#45;&#45;test-only command-line option. -->
<testcase name="must be fail" time="0.000055" classname="test" failure="this will cause the test to fail">
<failure type="testCodeFailure" message="this will cause the test to fail">
[Error [ERR_TEST_FAILURE]: this will cause the test to fail] {
failureType: 'testCodeFailure',
cause: Error: this will cause the test to fail
at throwError (/home/test-user/projects/testing-language-server/demo/node-test/util.js:2:9)
at TestContext.&lt;anonymous> (/home/test-user/projects/testing-language-server/demo/node-test/index.test.js:172:3)
at Test.runInAsyncScope (node:async_hooks:203:9)
at Test.run (node:internal/test_runner/test:631:25)
at Test.processPendingSubtests (node:internal/test_runner/test:374:18)
at Suite.postRun (node:internal/test_runner/test:715:19)
at Suite.run (node:internal/test_runner/test:962:10)
at async Test.processPendingSubtests (node:internal/test_runner/test:374:7),
code: 'ERR_TEST_FAILURE'
}
</failure>
</testcase>
<!-- tests 34 -->
<!-- suites 4 -->
<!-- pass 18 -->
<!-- fail 7 -->
<!-- cancelled 0 -->
<!-- skipped 5 -->
<!-- todo 4 -->
<!-- duration_ms 65.919879 -->
</testsuites>

View file

@ -0,0 +1,12 @@
{
"name": "test",
"version": "1.0.0",
"main": "index.test.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": ""
}

5
demo/node-test/util.js Normal file
View file

@ -0,0 +1,5 @@
const throwError = () => {
throw new Error("this will cause the test to fail");
};
module.exports = { throwError };

1
demo/phpunit/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
vendor

2
demo/phpunit/.mise.toml Normal file
View file

@ -0,0 +1,2 @@
[tools]
php = "8.3"

View file

@ -0,0 +1,17 @@
{
"name": "kbwo/phpunit",
"autoload": {
"psr-4": {
"App\\": "src/"
}
},
"authors": [
{
"name": "kbwo",
"email": "kabaaa1126@gmail.com"
}
],
"require-dev": {
"phpunit/phpunit": "^11.3"
}
}

1651
demo/phpunit/composer.lock generated Normal file

File diff suppressed because it is too large Load diff

15
demo/phpunit/output.xml Normal file
View file

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="UTF-8"?>
<testsuites>
<testsuite name="CLI Arguments" tests="3" assertions="3" errors="0" failures="1" skipped="0" time="0.002791">
<testsuite name="Tests\CalculatorTest" file="/home/kbwo/testing-language-server/demo/phpunit/src/CalculatorTest.php" tests="3" assertions="3" errors="0" failures="1" skipped="0" time="0.002791">
<testcase name="testAdd" file="/home/kbwo/testing-language-server/demo/phpunit/src/CalculatorTest.php" line="10" class="Tests\CalculatorTest" classname="Tests.CalculatorTest" assertions="1" time="0.000695"/>
<testcase name="testSubtract" file="/home/kbwo/testing-language-server/demo/phpunit/src/CalculatorTest.php" line="17" class="Tests\CalculatorTest" classname="Tests.CalculatorTest" assertions="1" time="0.000046"/>
<testcase name="testFail1" file="/home/kbwo/testing-language-server/demo/phpunit/src/CalculatorTest.php" line="24" class="Tests\CalculatorTest" classname="Tests.CalculatorTest" assertions="1" time="0.002051">
<failure type="PHPUnit\Framework\ExpectationFailedException">Tests\CalculatorTest::testFail1
Failed asserting that 8 matches expected 1.
/home/kbwo/testing-language-server/demo/phpunit/src/CalculatorTest.php:28</failure>
</testcase>
</testsuite>
</testsuite>
</testsuites>

View file

@ -0,0 +1,16 @@
<?php
namespace App;
class Calculator
{
public function add($a, $b)
{
return $a + $b;
}
public function subtract($a, $b)
{
return $a - $b;
}
}

View file

@ -0,0 +1,30 @@
<?php
namespace Tests;
use App\Calculator;
use PHPUnit\Framework\TestCase;
class CalculatorTest extends TestCase
{
public function testAdd()
{
$calculator = new Calculator();
$result = $calculator->add(2, 3);
$this->assertEquals(5, $result);
}
public function testSubtract()
{
$calculator = new Calculator();
$result = $calculator->subtract(5, 3);
$this->assertEquals(2, $result);
}
public function testFail1()
{
$calculator = new Calculator();
$result = $calculator->subtract(10, 2);
$this->assertEquals(1, $result);
}
}

View file

@ -62,6 +62,13 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "demo"
version = "0.1.0"
dependencies = [
"tokio",
]
[[package]]
name = "gimli"
version = "0.28.1"
@ -239,13 +246,6 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "test_proj"
version = "0.1.0"
dependencies = [
"tokio",
]
[[package]]
name = "tokio"
version = "1.37.0"

View file

@ -1,5 +1,5 @@
[package]
name = "test_proj"
name = "demo"
version = "0.1.0"
edition = "2021"

65
demo/rust/src/lib.rs Normal file
View file

@ -0,0 +1,65 @@
fn hello() {
println!("Hello, world!");
}
#[cfg(test)]
mod tests {
fn not_test() {}
#[test]
fn success() {
assert!(true);
}
#[test]
fn fail() {
assert!(false);
}
#[tokio::test]
async fn tokio_test_success() {
assert!(true);
}
#[tokio::test]
async fn tokio_test_fail() {
assert!(false);
}
mod nested_namespace {
fn not_test() {}
#[test]
fn success() {
assert!(true);
}
#[test]
fn fail() {
assert!(false);
}
mod nested_nested_namespace {
fn not_test() {}
#[test]
fn success() {
assert!(true);
}
#[test]
fn fail() {
assert!(false);
}
}
}
fn p() {
panic!("test failed");
}
#[test]
fn test_panic() {
p();
}
}

2
demo/vitest/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
node_modules
.yarn

12
demo/vitest/basic.test.ts Normal file
View file

@ -0,0 +1,12 @@
import assert from "assert";
import { describe, test } from "vitest";
describe("describe text", () => {
test("pass", async () => {
assert(false);
});
test("fail", async () => {
assert(false);
});
});

16
demo/vitest/package.json Normal file
View file

@ -0,0 +1,16 @@
{
"name": "spec",
"version": "0.0.1",
"description": "neotest-vitest spec",
"main": "index.js",
"license": "MIT",
"dependencies": {
"ts-node": "^10.8.2",
"typescript": "^4.7.4"
},
"devDependencies": {
"@types/node": "^18.0.3",
"vite": "^3.0.9",
"vitest": "^0.22.1"
}
}

View file

@ -0,0 +1,13 @@
/// <reference types="vitest" />
// Configure Vitest (https://vitest.dev/config/)
import { defineConfig } from 'vite'
export default defineConfig({
test: {
/* for example, use global to avoid globals imports (describe, test, expect): */
// globals: true,
},
})

49
doc/ADAPTER_SPEC.md Normal file
View file

@ -0,0 +1,49 @@
# Adapter Specifications
This document outlines the command specifications.
# Commands
These commands must be implemented by the adapter.
- **discover**: Initiates the discovery process.
- **run-file-test**: Executes tests on specified files.
- **detect-workspace**: Identifies the workspace based on provided parameters.
## discover
### Arguments
- `file_paths`: A list of file paths to be processed.
### Stdout
Returns a JSON array of discovered items. Each item is a JSON object containing:
- `path`: String representing the file path.
- `tests`: Array of test items, where each test item is a JSON object including:
- `id`: String identifier for the test.
- `name`: String name of the test.
- `start_position`: [Range](https://docs.rs/lsp-types/latest/lsp_types/struct.Range.html) indicating the start position of the test in the file.
- `end_position`: [Range](https://docs.rs/lsp-types/latest/lsp_types/struct.Range.html) indicating the end position of the test in the file.
## run-file-test
### Arguments
- `file_paths`: A list of file paths to be tested.
- `workspace`: The workspace identifier where the tests will be executed.
### Stdout
Returns a JSON array of test results. Each result is a JSON object containing:
- `path`: String representing the file path.
- `diagnostics`: Array of [Diagnostic](https://docs.rs/lsp-types/latest/lsp_types/struct.Diagnostic.html) objects.
## detect-workspace
### Arguments
- `file_paths`: A list of file paths to identify the workspace.
### Stdout
Returns a JSON object where:
- Keys are strings representing workspace file paths.
- Values are arrays of strings representing file paths associated with each workspace.
# Note: All stdout must be valid JSON and should be parseable by standard JSON parsers.

View file

@ -1,46 +0,0 @@
use std::collections::HashMap;
use std::str::FromStr;
use strum::{AsRefStr, Display};
use once_cell::sync::Lazy;
type Extension<'a> = &'a str;
#[derive(Display, AsRefStr, Eq, PartialEq, Hash)]
pub enum AvailableFileType {
#[strum(serialize = "rust")]
Rust,
#[strum(serialize = "javascript")]
Javascript,
#[strum(serialize = "javascriptreact")]
JavascriptReact,
#[strum(serialize = "typescript")]
Typescript,
#[strum(serialize = "typescriptreact")]
TypescriptReact,
}
impl FromStr for AvailableFileType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"rust" => Ok(AvailableFileType::Rust),
"javascript" => Ok(AvailableFileType::Javascript),
"javascriptreact" => Ok(AvailableFileType::JavascriptReact),
"typescript" => Ok(AvailableFileType::Typescript),
"typescriptreact" => Ok(AvailableFileType::TypescriptReact),
_ => Err(format!("Unknown file type: {}", s)),
}
}
}
pub static LANGUAGE_ID_MAP: Lazy<HashMap<AvailableFileType, Vec<Extension>>> = Lazy::new(|| {
let mut map = HashMap::new();
map.insert(AvailableFileType::Rust, vec!["rs"]);
map.insert(AvailableFileType::Javascript, vec!["js", "jsx"]);
map.insert(AvailableFileType::JavascriptReact, vec!["js", "jsx"]);
map.insert(AvailableFileType::Typescript, vec!["ts", "tsx"]);
map.insert(AvailableFileType::TypescriptReact, vec!["ts", "tsx"]);
map
});

View file

@ -1,6 +1,3 @@
pub mod error;
pub mod language;
pub mod log;
pub mod server;
pub mod spec;
pub mod util;

View file

@ -1,13 +1,28 @@
use crate::util::clean_old_logs;
use std::path::PathBuf;
use tracing_appender::non_blocking::WorkerGuard;
pub struct Log;
impl Log {
pub fn init() -> Result<WorkerGuard, anyhow::Error> {
fn log_dir() -> PathBuf {
let home_dir = dirs::home_dir().unwrap();
let log_path = home_dir.join(".config/testing_language_server/logs");
let file_appender = tracing_appender::rolling::daily(log_path, "prefix.log");
home_dir.join(".config/testing_language_server/logs")
}
pub fn init() -> Result<WorkerGuard, anyhow::Error> {
let log_dir_path = Self::log_dir();
let prefix = "server.log";
let file_appender = tracing_appender::rolling::daily(&log_dir_path, prefix);
let (non_blocking, guard) = tracing_appender::non_blocking(file_appender);
clean_old_logs(
log_dir_path.to_str().unwrap(),
30,
&format!("{prefix}.*"),
&format!("{prefix}."),
)
.unwrap();
tracing_subscriber::fmt().with_writer(non_blocking).init();
Ok(guard)
}

View file

@ -1,10 +1,158 @@
use testing_language_server::log::Log;
use testing_language_server::server::TestingLS;
mod error;
mod log;
mod server;
mod spec;
mod util;
use std::io::{self, BufRead, Read};
use error::LSError;
use lsp_types::InitializeParams;
use serde::de::Error;
use serde::Deserialize;
use serde_json::{json, Value};
use util::{format_uri, send_stdout};
use crate::log::Log;
use crate::server::TestingLS;
use crate::util::send_error;
fn extract_textdocument_uri(params: &Value) -> Result<String, serde_json::Error> {
let uri = params["textDocument"]["uri"]
.as_str()
.ok_or(serde_json::Error::custom("`textDocument.uri` is not set"))?;
Ok(format_uri(uri))
}
fn extract_uri(params: &Value) -> Result<String, serde_json::Error> {
let uri = params["uri"]
.as_str()
.ok_or(serde_json::Error::custom("`uri` is not set"))?;
Ok(format_uri(uri))
}
fn main_loop(server: &mut TestingLS) -> Result<(), LSError> {
let mut is_workspace_checked = false;
loop {
let mut size = 0;
'read_header: loop {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_line(&mut buffer)?;
if buffer.is_empty() {
tracing::warn!("buffer is empty")
}
// The end of header section
if buffer == "\r\n" {
break 'read_header;
}
let split: Vec<&str> = buffer.split(' ').collect();
if split.len() != 2 {
tracing::warn!("unexpected");
}
let header_name = split[0].to_lowercase();
let header_value = split[1].trim();
match header_name.as_ref() {
"content-length" => {}
"content-type:" => {}
_ => {}
}
size = header_value.parse::<usize>().unwrap();
}
let stdin = io::stdin();
let mut handle = stdin.lock();
let mut buf = vec![0u8; size];
handle.read_exact(&mut buf).unwrap();
let message = String::from_utf8(buf).unwrap();
let received_json: Value = serde_json::from_str(&message)?;
tracing::info!("received json={:#?}", received_json);
let method = &received_json["method"].as_str();
let params = &received_json["params"];
if let Some(method) = method {
match *method {
"$/cancelRequest" => {}
"initialized" => {
is_workspace_checked = true;
server.diagnose_workspace()?;
}
"initialize" => {
let initialize_params = InitializeParams::deserialize(params)?;
let id = received_json["id"].as_i64().unwrap();
server.initialize(id, initialize_params)?;
}
"shutdown" => {
let id = received_json["id"].as_i64().unwrap();
server.shutdown(id)?;
}
"exit" => {
std::process::exit(0);
}
"workspace/diagnostic" => {
is_workspace_checked = true;
server.diagnose_workspace()?;
}
"textDocument/diagnostic" | "textDocument/didSave" => {
let uri = extract_textdocument_uri(params)?;
server.check_file(&uri, false)?;
}
"textDocument/didOpen" => {
if !is_workspace_checked {
is_workspace_checked = true;
server.diagnose_workspace()?;
}
let uri = extract_textdocument_uri(params)?;
if server.refreshing_needed(&uri) {
server.refresh_workspaces_cache()?;
}
}
"$/runFileTest" => {
let uri = extract_uri(params)?;
server.check_file(&uri, false)?;
}
"$/runWorkspaceTest" => {
server.diagnose_workspace()?;
}
"$/discoverFileTest" => {
let id = received_json["id"].as_i64().unwrap();
let uri = extract_uri(params)?;
let result = server.discover_file(&uri)?;
send_stdout(&json!({
"jsonrpc": "2.0",
"id": id,
"result": result,
}))?;
}
_ => {
// https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#responseMessage
let id = received_json["id"].as_i64();
if id.is_some() {
send_error(
id,
-32601, // Method not found
format!("method not found: {}", method),
)?;
}
}
}
}
}
}
fn main() {
let mut server = TestingLS::new();
let _guard = Log::init().expect("Failed to initialize logger");
if let Err(ls_error) = server.main_loop() {
if let Err(ls_error) = main_loop(&mut server) {
tracing::error!("Error: {:?}", ls_error);
}
}

View file

@ -1,56 +1,31 @@
use crate::error::LSError;
use crate::spec::AdapterConfiguration;
use crate::spec::AdapterId;
use crate::spec::DetectWorkspaceResult;
use crate::spec::DiscoverResult;
use crate::spec::RunFileTestResult;
use crate::spec::RunFileTestResultItem;
use crate::spec::WorkspaceAnalysis;
use crate::spec::*;
use crate::util::resolve_path;
use crate::util::send_stdout;
use glob::glob;
use glob::Pattern;
use lsp_types::Diagnostic;
use lsp_types::DiagnosticOptions;
use lsp_types::DiagnosticServerCapabilities;
use lsp_types::DiagnosticSeverity;
use lsp_types::InitializeParams;
use lsp_types::InitializeResult;
use lsp_types::NumberOrString;
use lsp_types::Position;
use lsp_types::ProgressParams;
use lsp_types::ProgressParamsValue;
use lsp_types::PublishDiagnosticsParams;
use lsp_types::Range;
use lsp_types::ServerCapabilities;
use lsp_types::TextDocumentSyncCapability;
use lsp_types::TextDocumentSyncKind;
use lsp_types::Url;
use lsp_types::WorkDoneProgress;
use lsp_types::WorkDoneProgressBegin;
use lsp_types::WorkDoneProgressCreateParams;
use lsp_types::WorkDoneProgressEnd;
use lsp_types::WorkDoneProgressOptions;
use serde::de::Error;
use lsp_types::*;
use serde::Deserialize;
use serde_json::json;
use serde_json::Value;
use std::collections::HashMap;
use std::io::BufRead;
use std::io::{self, Read};
use std::env::current_dir;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::process::Output;
use testing_language_server::spec::DiscoverResult;
const TOML_FILE_NAME: &str = ".testingls.toml";
#[derive(Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct InitializedOptions {
adapter_command: HashMap<AdapterId, Vec<AdapterConfiguration>>,
project_dir: Option<PathBuf>,
adapter_command: HashMap<AdapterId, AdapterConfiguration>,
enable_workspace_diagnostics: Option<bool>,
}
pub struct TestingLS {
pub initialize_params: InitializeParams,
pub workspace_folders: Option<Vec<WorkspaceFolder>>,
pub options: InitializedOptions,
pub workspaces_cache: Vec<WorkspaceAnalysis>,
}
@ -61,167 +36,46 @@ impl Default for TestingLS {
}
}
/// The status of workspace diagnostics
/// - Skipped: Skip workspace diagnostics (when `enable_workspace_diagnostics` is false)
/// - Done: Finish workspace diagnostics (when `enable_workspace_diagnostics` is true)
#[derive(Debug, PartialEq, Eq)]
pub enum WorkspaceDiagnosticsStatus {
Skipped,
Done,
}
impl TestingLS {
pub fn new() -> Self {
Self {
initialize_params: Default::default(),
workspace_folders: None,
options: Default::default(),
workspaces_cache: Vec::new(),
}
}
pub fn main_loop(&mut self) -> Result<(), LSError> {
loop {
let mut size = 0;
'read_header: loop {
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock(); // We get `StdinLock` here.
handle.read_line(&mut buffer)?;
if buffer.is_empty() {
tracing::warn!("buffer is empty")
}
// The end of header section
if buffer == "\r\n" {
break 'read_header;
}
let splitted: Vec<&str> = buffer.split(' ').collect();
if splitted.len() != 2 {
tracing::warn!("unexpected");
}
let header_name = splitted[0].to_lowercase();
let header_value = splitted[1].trim();
match header_name.as_ref() {
"content-length" => {}
"content-type:" => {}
_ => {}
}
size = header_value.parse::<usize>().unwrap();
}
let stdin = io::stdin();
let mut handle = stdin.lock();
let mut buf = vec![0u8; size];
handle.read_exact(&mut buf).unwrap();
let message = String::from_utf8(buf).unwrap();
let value: Value = serde_json::from_str(&message)?;
let method = &value["method"].as_str();
let params = &value["params"];
if let Some(method) = method {
match *method {
"initialize" => {
self.initialize_params = InitializeParams::deserialize(params)?;
self.options = (self.handle_initialization_options(
self.initialize_params.initialization_options.as_ref(),
))?;
let id = value["id"].as_i64().unwrap();
self.initialize(id)?;
}
"workspace/diagnostic" => {
self.check_workspace()?;
}
"textDocument/diagnostic" | "textDocument/didSave" => {
let uri = params["textDocument"]["uri"]
.as_str()
.ok_or(serde_json::Error::custom("`textDocument.uri` is not set"))?;
self.check_file(uri, false)?;
}
"$/runFileTest" => {
let uri = params["uri"]
.as_str()
.ok_or(serde_json::Error::custom("`uri` is not set"))?;
self.check_file(uri, false)?;
}
"$/discoverFileTest" => {
let id = value["id"].as_i64().unwrap();
let uri = params["uri"]
.as_str()
.ok_or(serde_json::Error::custom("`uri` is not set"))?;
let result = self.discover_file(uri)?;
send_stdout(&json!({
"jsonrpc": "2.0",
"id": id,
"result": result,
}))?;
}
_ => {}
}
}
}
}
fn adapter_commands(&self) -> HashMap<AdapterId, Vec<AdapterConfiguration>> {
self.options.adapter_command.clone()
}
fn project_files(
base_dir: &Path,
include_pattern: &[String],
exclude_pattern: &[String],
) -> Vec<String> {
let mut result: Vec<String> = vec![];
let base_dir = base_dir.to_string_lossy().to_string();
let exclude_pattern = exclude_pattern
.iter()
.filter_map(|exclude_pattern| {
Pattern::new(&format!("!{base_dir}{exclude_pattern}")).ok()
})
.collect::<Vec<Pattern>>();
for include_pattern in include_pattern {
let matched = glob(format!("{base_dir}{include_pattern}").as_str());
if let Ok(entries) = matched {
for path in entries.flatten() {
let should_exclude = exclude_pattern
.iter()
.any(|exclude_pattern| exclude_pattern.matches(path.to_str().unwrap()));
if !should_exclude {
result.push(path.display().to_string());
}
}
}
}
result
}
fn build_capabilities(&self) -> ServerCapabilities {
ServerCapabilities {
diagnostic_provider: Some(DiagnosticServerCapabilities::Options(DiagnosticOptions {
identifier: None,
inter_file_dependencies: false,
workspace_diagnostics: true,
work_done_progress_options: WorkDoneProgressOptions::default(),
})),
text_document_sync: Some(TextDocumentSyncCapability::Kind(
TextDocumentSyncKind::INCREMENTAL,
)),
..ServerCapabilities::default()
}
}
pub fn handle_initialization_options(
&self,
options: Option<&Value>,
) -> Result<InitializedOptions, LSError> {
if let Some(options) = options {
Ok(serde_json::from_value(options.clone())?)
fn project_dir(&self) -> Result<PathBuf, LSError> {
let cwd = current_dir();
if let Ok(cwd) = cwd {
Ok(cwd)
} else {
Err(LSError::Any(anyhow::anyhow!(
"Invalid initialization options"
)))
let default_project_dir = self
.workspace_folders
.as_ref()
.ok_or(LSError::Any(anyhow::anyhow!("No workspace folders found")))?;
let default_workspace_uri = &default_project_dir[0].uri;
Ok(default_workspace_uri.to_file_path().unwrap())
}
}
pub fn initialize(&self, id: i64) -> Result<(), LSError> {
pub fn initialize(
&mut self,
id: i64,
initialize_params: InitializeParams,
) -> Result<(), LSError> {
self.workspace_folders = initialize_params.workspace_folders;
self.options = (self
.handle_initialization_options(initialize_params.initialization_options.as_ref()))?;
let result = InitializeResult {
capabilities: self.build_capabilities(),
..InitializeResult::default()
@ -236,56 +90,134 @@ impl TestingLS {
Ok(())
}
pub fn refresh_workspaces_cache(&mut self) -> Result<(), LSError> {
let adapter_commands = self.adapter_commands();
let default_project_dir = self
.initialize_params
.clone()
.workspace_folders
.ok_or(LSError::Any(anyhow::anyhow!("No workspace folders found")))?;
let default_workspace_uri = default_project_dir[0].uri.clone();
let project_dir = self
.options
.project_dir
.clone()
.unwrap_or(default_workspace_uri.to_file_path().unwrap());
self.workspaces_cache = vec![];
// Nested and multiple loops, but each count is small
for adapter_commands in adapter_commands.values() {
for adapter in adapter_commands {
let &AdapterConfiguration {
path,
extra_args,
envs,
include_patterns,
exclude_patterns,
} = &adapter;
let file_paths =
Self::project_files(&project_dir, include_patterns, exclude_patterns);
if file_paths.is_empty() {
continue;
}
let mut adapter_command = Command::new(path);
let mut args_file_path: Vec<&str> = vec![];
file_paths.iter().for_each(|file_path| {
args_file_path.push("--file-paths");
args_file_path.push(file_path);
});
let output = adapter_command
.arg("detect-workspace")
.args(args_file_path)
.arg("--")
.args(extra_args)
.envs(envs)
.output()
.map_err(|err| LSError::Adapter(err.to_string()))?;
let adapter_result = String::from_utf8(output.stdout)
.map_err(|err| LSError::Adapter(err.to_string()))?;
let workspace: DetectWorkspaceResult = serde_json::from_str(&adapter_result)?;
self.workspaces_cache
.push(WorkspaceAnalysis::new(adapter.clone(), workspace))
fn adapter_commands(&self) -> HashMap<AdapterId, AdapterConfiguration> {
self.options.adapter_command.clone()
}
fn project_files(base_dir: &Path, include: &[String], exclude: &[String]) -> Vec<String> {
let mut result: Vec<String> = vec![];
let exclude_pattern = exclude
.iter()
.filter_map(|exclude_pattern| {
Pattern::new(base_dir.join(exclude_pattern).to_str().unwrap()).ok()
})
.collect::<Vec<Pattern>>();
let base_dir = base_dir.to_str().unwrap();
let entries = globwalk::GlobWalkerBuilder::from_patterns(base_dir, include)
.follow_links(true)
.build()
.unwrap()
.filter_map(Result::ok);
for path in entries {
let should_exclude = exclude_pattern
.iter()
.any(|exclude_pattern| exclude_pattern.matches(path.path().to_str().unwrap()));
if !should_exclude {
result.push(path.path().to_str().unwrap().to_owned());
}
}
result
}
fn build_capabilities(&self) -> ServerCapabilities {
ServerCapabilities {
diagnostic_provider: Some(DiagnosticServerCapabilities::Options(DiagnosticOptions {
identifier: None,
inter_file_dependencies: false,
workspace_diagnostics: true,
work_done_progress_options: WorkDoneProgressOptions::default(),
})),
text_document_sync: Some(TextDocumentSyncCapability::Kind(TextDocumentSyncKind::NONE)),
..ServerCapabilities::default()
}
}
pub fn handle_initialization_options(
&self,
options: Option<&Value>,
) -> Result<InitializedOptions, LSError> {
let project_dir = self.project_dir()?;
let toml_path = project_dir.join(TOML_FILE_NAME);
let toml_content = std::fs::read_to_string(toml_path);
match toml_content {
Ok(toml_content) => Ok(toml::from_str::<InitializedOptions>(&toml_content).unwrap()),
Err(_) => {
if let Some(options) = options {
Ok(serde_json::from_value(options.clone())?)
} else {
Err(LSError::Any(anyhow::anyhow!(
"Invalid initialization options"
)))
}
}
}
}
pub fn refresh_workspaces_cache(&mut self) -> Result<(), LSError> {
let adapter_commands = self.adapter_commands();
let project_dir = self.project_dir()?;
self.workspaces_cache = vec![];
// Nested and multiple loops, but each count is small
for adapter in adapter_commands.into_values() {
let AdapterConfiguration {
path,
extra_arg,
env,
include,
exclude,
workspace_dir,
..
} = &adapter;
let file_paths = Self::project_files(&project_dir, include, exclude);
if file_paths.is_empty() {
continue;
}
let mut adapter_command = Command::new(path);
let mut args_file_path: Vec<&str> = vec![];
file_paths.iter().for_each(|file_path| {
args_file_path.push("--file-paths");
args_file_path.push(file_path);
});
let output = adapter_command
.arg("detect-workspace")
.args(args_file_path)
.arg("--")
.args(extra_arg)
.envs(env)
.output()
.map_err(|err| LSError::Adapter(err.to_string()))?;
let adapter_result = String::from_utf8(output.stdout)
.map_err(|err| LSError::Adapter(err.to_string()))?;
let workspace: DetectWorkspaceResult = match serde_json::from_str(&adapter_result) {
Ok(result) => result,
Err(err) => {
let stderr = String::from_utf8(output.stderr);
tracing::error!("Failed to parse adapter result: {:?}", err);
tracing::error!("Error: {:?}", stderr);
return Err(LSError::Adapter(err.to_string()));
}
};
let workspace = if let Some(workspace_dir) = workspace_dir {
let workspace_dir = resolve_path(&project_dir, workspace_dir)
.to_str()
.unwrap()
.to_string();
let target_paths = workspace
.data
.into_iter()
.flat_map(|kv| kv.1)
.collect::<Vec<_>>();
HashMap::from([(workspace_dir, target_paths)])
} else {
workspace.data
};
self.workspaces_cache.push(WorkspaceAnalysis::new(
adapter,
DetectWorkspaceResult { data: workspace },
))
}
tracing::info!("workspaces_cache={:#?}", self.workspaces_cache);
send_stdout(&json!({
"jsonrpc": "2.0",
"method": "$/detectedWorkspace",
@ -294,25 +226,59 @@ impl TestingLS {
Ok(())
}
pub fn check_workspace(&mut self) -> Result<(), LSError> {
/// Diagnoses the entire workspace for diagnostics.
/// This function will refresh the workspace cache, check if workspace diagnostics are enabled,
/// and then iterate through all workspaces to diagnose them.
/// It will trigger the publication of diagnostics for all files in the workspace
/// through the Language Server Protocol.
pub fn diagnose_workspace(&mut self) -> Result<WorkspaceDiagnosticsStatus, LSError> {
self.refresh_workspaces_cache()?;
if !self.options.enable_workspace_diagnostics.unwrap_or(true) {
return Ok(WorkspaceDiagnosticsStatus::Skipped);
}
self.workspaces_cache.iter().for_each(
|WorkspaceAnalysis {
adapter_config: adapter,
workspaces,
}| {
workspaces.iter().for_each(|(workspace, paths)| {
let _ = self.check(adapter, workspace, paths);
workspaces.data.iter().for_each(|(workspace, paths)| {
let _ = self.diagnose(adapter, workspace, paths);
})
},
);
Ok(())
Ok(WorkspaceDiagnosticsStatus::Done)
}
pub fn refreshing_needed(&self, path: &str) -> bool {
let base_dir = self.project_dir();
match base_dir {
Ok(base_dir) => self.workspaces_cache.iter().any(|cache| {
let include = &cache.adapter_config.include;
let exclude = &cache.adapter_config.exclude;
if cache
.workspaces
.data
.iter()
.any(|(_, workspace)| workspace.contains(&path.to_string()))
{
return false;
}
Self::project_files(&base_dir, include, exclude).contains(&path.to_owned())
}),
Err(e) => {
tracing::error!("Error: {:?}", e);
false
}
}
}
/// Checks a specific file for diagnostics, optionally refreshing the workspace cache.
/// This function will trigger the publication of diagnostics for the specified file
/// through the Language Server Protocol.
pub fn check_file(&mut self, path: &str, refresh_needed: bool) -> Result<(), LSError> {
let path = path.replace("file://", "");
if refresh_needed {
if refresh_needed || self.workspaces_cache.is_empty() {
self.refresh_workspaces_cache()?;
}
self.workspaces_cache.iter().for_each(
@ -320,11 +286,11 @@ impl TestingLS {
adapter_config: adapter,
workspaces,
}| {
for (workspace, paths) in workspaces.iter() {
for (workspace, paths) in workspaces.data.iter() {
if !paths.contains(&path.to_string()) {
continue;
}
let _ = self.check(adapter, workspace, paths);
let _ = self.diagnose(adapter, workspace, &[path.to_string()]);
}
},
);
@ -351,57 +317,50 @@ impl TestingLS {
.arg("run-file-test")
.args(args)
.arg("--")
.args(&adapter.extra_args)
.envs(&adapter.envs)
.args(&adapter.extra_arg)
.envs(&adapter.env)
.output()
.map_err(|err| LSError::Adapter(err.to_string()))?;
let Output { stdout, stderr, .. } = output;
if !stderr.is_empty() {
let message = "Cannot run test command: \n".to_string()
+ &String::from_utf8(stderr.clone()).unwrap();
let placeholder_diagnostic = Diagnostic {
range: Range {
start: Position {
line: 0,
character: 0,
},
end: Position {
line: 0,
character: 0,
},
},
let message = "Error occurred when running test via adapter.\nCheck adapter log or run tests manually".to_string();
let message_type = MessageType::ERROR;
let params: ShowMessageParams = ShowMessageParams {
typ: message_type,
message,
severity: Some(DiagnosticSeverity::WARNING),
code_description: None,
code: None,
source: None,
tags: None,
related_information: None,
data: None,
};
for path in paths {
diagnostics.push((path.to_string(), vec![placeholder_diagnostic.clone()]));
}
send_stdout(&json!({
"jsonrpc": "2.0",
"method": "window/showMessage",
"params": params,
}))
.unwrap();
}
let adapter_result =
String::from_utf8(stdout).map_err(|err| LSError::Adapter(err.to_string()))?;
if let Ok(res) = serde_json::from_str::<RunFileTestResult>(&adapter_result) {
for target_file in paths {
let diagnostics_for_file: Vec<Diagnostic> = res
.clone()
.iter()
.filter(|RunFileTestResultItem { path, .. }| path == target_file)
.flat_map(|RunFileTestResultItem { diagnostics, .. }| diagnostics.clone())
.collect();
let uri = Url::from_file_path(target_file.replace("file://", "")).unwrap();
diagnostics.push((uri.to_string(), diagnostics_for_file));
match serde_json::from_str::<RunFileTestResult>(&adapter_result) {
Ok(res) => {
for target_file in paths {
let diagnostics_for_file: Vec<Diagnostic> = res
.data
.clone()
.into_iter()
.filter(|FileDiagnostics { path, .. }| path == target_file)
.flat_map(|FileDiagnostics { diagnostics, .. }| diagnostics)
.collect();
let uri = Url::from_file_path(target_file.replace("file://", "")).unwrap();
diagnostics.push((uri.to_string(), diagnostics_for_file));
}
}
Err(err) => {
tracing::error!("Failed to parse adapter result: {:?}", err);
}
}
Ok(diagnostics)
}
fn check(
fn diagnose(
&self,
adapter: &AdapterConfiguration,
workspace: &str,
@ -419,7 +378,7 @@ impl TestingLS {
}))
.unwrap();
let progress_begin = WorkDoneProgressBegin {
title: format!("Testing by adapter: {}", adapter.path),
title: "Testing".to_string(),
cancellable: Some(false),
message: Some(format!("testing {} files ...", paths.len())),
percentage: Some(0),
@ -458,20 +417,21 @@ impl TestingLS {
}
#[allow(clippy::for_kv_map)]
fn discover_file(&self, path: &str) -> Result<DiscoverResult, LSError> {
let path = path.replace("file://", "");
pub fn discover_file(&self, path: &str) -> Result<DiscoverResult, LSError> {
let target_paths = vec![path.to_string()];
let mut result: DiscoverResult = vec![];
let mut result: DiscoverResult = DiscoverResult { data: vec![] };
for WorkspaceAnalysis {
adapter_config: adapter,
workspaces,
} in &self.workspaces_cache
{
for (_, paths) in workspaces.iter() {
for (_, paths) in workspaces.data.iter() {
if !paths.contains(&path.to_string()) {
continue;
}
result.extend(self.discover(adapter, &target_paths)?);
result
.data
.extend(self.discover(adapter, &target_paths)?.data);
}
}
Ok(result)
@ -492,8 +452,8 @@ impl TestingLS {
.arg("discover")
.args(args)
.arg("--")
.args(&adapter.extra_args)
.envs(&adapter.envs)
.args(&adapter.extra_arg)
.envs(&adapter.env)
.output()
.map_err(|err| LSError::Adapter(err.to_string()))?;
@ -511,11 +471,19 @@ impl TestingLS {
}))?;
Ok(())
}
pub fn shutdown(&self, id: i64) -> Result<(), LSError> {
send_stdout(&json!({
"jsonrpc": "2.0",
"id": id,
"result": null
}))?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::util::extension_from_url_str;
use lsp_types::{Url, WorkspaceFolder};
use std::collections::HashMap;
@ -523,28 +491,25 @@ mod tests {
#[test]
fn test_check_file() {
let abs_path_of_test_proj = std::env::current_dir().unwrap().join("test_proj/rust");
let abs_path_of_demo = std::env::current_dir().unwrap().join("demo/rust");
let mut server = TestingLS {
initialize_params: InitializeParams {
workspace_folders: Some(vec![WorkspaceFolder {
uri: Url::from_file_path(&abs_path_of_test_proj).unwrap(),
name: "test_proj".to_string(),
}]),
..InitializeParams::default()
},
workspace_folders: Some(vec![WorkspaceFolder {
uri: Url::from_file_path(&abs_path_of_demo).unwrap(),
name: "demo".to_string(),
}]),
options: InitializedOptions {
adapter_command: HashMap::from([(String::from(".rs"), vec![])]),
project_dir: None,
adapter_command: HashMap::new(),
enable_workspace_diagnostics: Some(true),
},
workspaces_cache: Vec::new(),
};
let librs = abs_path_of_test_proj.join("lib.rs");
let librs = abs_path_of_demo.join("lib.rs");
server.check_file(librs.to_str().unwrap(), true).unwrap();
}
#[test]
fn test_check_workspace() {
let abs_path_of_test_proj = std::env::current_dir().unwrap().join("test_proj/rust");
let abs_path_of_demo = std::env::current_dir().unwrap().join("demo/rust");
let abs_path_of_rust_adapter = std::env::current_dir()
.unwrap()
.join("target/debug/testing-ls-adapter");
@ -554,26 +519,21 @@ mod tests {
.unwrap();
let adapter_conf = AdapterConfiguration {
path: abs_path_of_rust_adapter,
extra_args: vec!["--test-kind=cargo-test".to_string()],
envs: HashMap::new(),
include_patterns: vec![],
exclude_patterns: vec![],
extra_arg: vec!["--test-kind=cargo-test".to_string()],
..Default::default()
};
let mut server = TestingLS {
initialize_params: InitializeParams {
workspace_folders: Some(vec![WorkspaceFolder {
uri: Url::from_file_path(abs_path_of_test_proj.clone()).unwrap(),
name: "test_proj".to_string(),
}]),
..InitializeParams::default()
},
workspace_folders: Some(vec![WorkspaceFolder {
uri: Url::from_file_path(&abs_path_of_demo).unwrap(),
name: "demo".to_string(),
}]),
options: InitializedOptions {
adapter_command: HashMap::from([(String::from(".rs"), vec![adapter_conf])]),
project_dir: None,
adapter_command: HashMap::from([(String::from(".rs"), adapter_conf)]),
enable_workspace_diagnostics: Some(true),
},
workspaces_cache: Vec::new(),
};
server.check_workspace().unwrap();
server.diagnose_workspace().unwrap();
server
.workspaces_cache
.iter()
@ -582,9 +542,10 @@ mod tests {
assert!(adapter_command_path.contains("target/debug/testing-ls-adapter"));
workspace_analysis
.workspaces
.data
.iter()
.for_each(|(workspace, paths)| {
assert_eq!(workspace, abs_path_of_test_proj.to_str().unwrap());
assert_eq!(workspace, abs_path_of_demo.to_str().unwrap());
paths.iter().for_each(|path| {
assert!(path.contains("rust/src"));
});
@ -594,69 +555,37 @@ mod tests {
#[test]
fn project_files_are_filtered_by_extension() {
let absolute_path_of_test_proj = std::env::current_dir().unwrap().join("test_proj");
let absolute_path_of_demo = std::env::current_dir().unwrap().join("demo");
let files = TestingLS::project_files(
&absolute_path_of_test_proj.clone(),
&absolute_path_of_demo.clone(),
&["/rust/src/lib.rs".to_string()],
&["/rust/src/target/**/*".to_string()],
&["/rust/target/**/*".to_string()],
);
let librs = absolute_path_of_test_proj.join("rust/src/lib.rs");
let librs = absolute_path_of_demo.join("rust/src/lib.rs");
assert_eq!(files, vec![librs.to_str().unwrap()]);
let files = TestingLS::project_files(
&absolute_path_of_test_proj.clone(),
&["**/*.js".to_string()],
&["**/node_modules/**/*".to_string()],
&absolute_path_of_demo.clone(),
&["jest/*.spec.js".to_string()],
&["jest/another.spec.js".to_string()],
);
files.iter().for_each(|file| {
assert_eq!(extension_from_url_str(file).unwrap(), ".js");
});
let test_file = absolute_path_of_demo.join("jest/index.spec.js");
assert_eq!(files, vec![test_file.to_str().unwrap()]);
}
#[test]
fn bubble_adapter_error() {
let adapter_conf: AdapterConfiguration = AdapterConfiguration {
path: std::env::current_dir()
.unwrap()
.join("target/debug/testing-ls-adapter")
.to_str()
.unwrap()
.to_string(),
extra_args: vec!["--invalid-arg".to_string()],
envs: HashMap::new(),
include_patterns: vec![],
exclude_patterns: vec![],
};
let abs_path_of_test_proj = std::env::current_dir().unwrap().join("test_proj/rust");
let files = TestingLS::project_files(
&abs_path_of_test_proj.clone(),
&["/**/*.rs".to_string()],
&[],
);
let server = TestingLS {
initialize_params: InitializeParams {
workspace_folders: Some(vec![WorkspaceFolder {
uri: Url::from_file_path(&abs_path_of_test_proj).unwrap(),
name: "test_proj".to_string(),
}]),
..InitializeParams::default()
},
fn skip_workspace_diagnostics() {
let mut server = TestingLS {
workspace_folders: Some(vec![WorkspaceFolder {
uri: Url::from_file_path(current_dir().unwrap()).unwrap(),
name: "demo".to_string(),
}]),
options: InitializedOptions {
adapter_command: HashMap::from([(String::from(".rs"), vec![adapter_conf.clone()])]),
project_dir: None,
adapter_command: HashMap::new(),
enable_workspace_diagnostics: Some(false),
},
workspaces_cache: Vec::new(),
};
let diagnostics = server
.get_diagnostics(
&adapter_conf,
abs_path_of_test_proj.to_str().unwrap(),
&files,
)
.unwrap();
assert_eq!(diagnostics.len(), 1);
let diagnostic = diagnostics.first().unwrap().1.first().unwrap();
assert_eq!(diagnostic.severity.unwrap(), DiagnosticSeverity::WARNING);
assert!(diagnostic.message.contains("Cannot run test command:"));
let status = server.diagnose_workspace().unwrap();
assert_eq!(status, WorkspaceDiagnosticsStatus::Skipped);
}
}

View file

@ -1,6 +1,7 @@
use clap::Parser;
use lsp_types::Diagnostic;
use lsp_types::Range;
use lsp_types::ShowMessageParams;
use serde::Deserialize;
use serde::Serialize;
use std::collections::HashMap;
@ -12,6 +13,7 @@ pub enum AdapterCommands {
DetectWorkspace(DetectWorkspaceArgs),
}
/// Arguments for `<adapter command> discover` command
#[derive(clap::Args, Debug)]
#[command(version, about, long_about = None)]
pub struct DiscoverArgs {
@ -21,6 +23,7 @@ pub struct DiscoverArgs {
pub extra: Vec<String>,
}
/// Arguments for `<adapter command> run-file-test` command
#[derive(clap::Args, Debug)]
#[command(version, about, long_about = None)]
pub struct RunFileTestArgs {
@ -34,6 +37,7 @@ pub struct RunFileTestArgs {
pub extra: Vec<String>,
}
/// Arguments for `<adapter command> detect-workspace` command
#[derive(clap::Args, Debug)]
#[command(version, about, long_about = None)]
pub struct DetectWorkspaceArgs {
@ -43,9 +47,9 @@ pub struct DetectWorkspaceArgs {
pub extra: Vec<String>,
}
pub(crate) type AdapterId = String;
pub(crate) type FilePath = String;
pub(crate) type WorkspaceFilePath = String;
pub type AdapterId = String;
pub type FilePath = String;
pub type WorkspaceFilePath = String;
#[derive(Debug, Serialize, Clone)]
pub struct WorkspaceAnalysis {
@ -62,39 +66,59 @@ impl WorkspaceAnalysis {
}
}
#[derive(Debug, Deserialize, Clone, Serialize)]
#[derive(Debug, Deserialize, Clone, Serialize, Default)]
pub struct AdapterConfiguration {
pub path: String,
#[serde(default)]
pub extra_args: Vec<String>,
pub extra_arg: Vec<String>,
#[serde(default)]
pub envs: HashMap<String, String>,
pub include_patterns: Vec<String>,
pub exclude_patterns: Vec<String>,
pub env: HashMap<String, String>,
pub include: Vec<String>,
pub exclude: Vec<String>,
pub workspace_dir: Option<String>,
}
pub type DetectWorkspaceResult = HashMap<WorkspaceFilePath, Vec<FilePath>>;
/// Result of `<adapter command> detect-workspace`
#[derive(Debug, Serialize, Clone, Deserialize)]
pub struct DetectWorkspaceResult {
pub data: HashMap<WorkspaceFilePath, Vec<FilePath>>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)]
pub struct RunFileTestResultItem {
pub struct FileDiagnostics {
pub path: String,
pub diagnostics: Vec<Diagnostic>,
}
pub type RunFileTestResult = Vec<RunFileTestResultItem>;
/// Result of `<adapter command> run-file-test`
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)]
pub struct RunFileTestResult {
pub data: Vec<FileDiagnostics>,
#[serde(default)]
pub messages: Vec<ShowMessageParams>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct TestItem {
pub id: String,
pub name: String,
/// Although FoundFileTests also has a `path` field, we keep the `path` field in TestItem
/// because sometimes we need to determine where a TestItem is located on its own
/// Example: In Rust tests, determining which file contains a test from IDs like relative::path::tests::id
/// TODO: Remove FoundFileTests.path once we confirm it's no longer needed
pub path: String,
pub start_position: Range,
pub end_position: Range,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct DiscoverResultItem {
pub struct FoundFileTests {
pub path: String,
pub tests: Vec<TestItem>,
}
pub type DiscoverResult = Vec<DiscoverResultItem>;
/// Result of `<adapter command> discover`
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct DiscoverResult {
pub data: Vec<FoundFileTests>,
}

View file

@ -1,20 +1,191 @@
use crate::error::LSError;
use chrono::NaiveDate;
use chrono::Utc;
use serde::Deserialize;
use serde::Serialize;
use serde_json::json;
use serde_json::Number;
use serde_json::Value;
use std::fs;
use std::io::stdout;
use std::io::Write;
/// Returns the extension which includes `.` from the url string
pub fn extension_from_url_str(url_str: &str) -> Option<String> {
Some(String::from(".") + url_str.split('.').last().unwrap())
}
use std::path::Path;
use std::path::PathBuf;
pub fn send_stdout<T>(message: &T) -> Result<(), LSError>
where
T: ?Sized + Serialize,
T: ?Sized + Serialize + std::fmt::Debug,
{
tracing::info!("send stdout: {:#?}", message);
let msg = serde_json::to_string(message)?;
let mut stdout = stdout().lock();
write!(stdout, "Content-Length: {}\r\n\r\n{}", msg.len(), msg)?;
stdout.flush()?;
Ok(())
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ErrorMessage {
jsonrpc: String,
id: Option<Number>,
pub error: Value,
}
impl ErrorMessage {
#[allow(dead_code)]
pub fn new<N: Into<Number>>(id: Option<N>, error: Value) -> Self {
Self {
jsonrpc: "2.0".into(),
id: id.map(|i| i.into()),
error,
}
}
}
pub fn send_error<S: Into<String>>(id: Option<i64>, code: i64, msg: S) -> Result<(), LSError> {
send_stdout(&ErrorMessage::new(
id,
json!({ "code": code, "message": msg.into() }),
))
}
pub fn format_uri(uri: &str) -> String {
uri.replace("file://", "")
}
pub fn resolve_path(base_dir: &Path, relative_path: &str) -> PathBuf {
let absolute = if Path::new(relative_path).is_absolute() {
PathBuf::from(relative_path)
} else {
base_dir.join(relative_path)
};
let mut components = Vec::new();
for component in absolute.components() {
match component {
std::path::Component::ParentDir => {
components.pop();
}
std::path::Component::Normal(_) | std::path::Component::RootDir => {
components.push(component);
}
_ => {}
}
}
PathBuf::from_iter(components)
}
pub fn clean_old_logs(
log_dir: &str,
retention_days: i64,
glob_pattern: &str,
prefix: &str,
) -> Result<(), LSError> {
let today = Utc::now().date_naive();
let retention_threshold = today - chrono::Duration::days(retention_days);
let walker = globwalk::GlobWalkerBuilder::from_patterns(log_dir, &[glob_pattern])
.build()
.unwrap();
for entry in walker.filter_map(Result::ok) {
let path = entry.path();
if let Some(file_name) = path.file_name().and_then(|f| f.to_str()) {
if let Some(date_str) = file_name.strip_prefix(prefix) {
if let Ok(file_date) = NaiveDate::parse_from_str(date_str, "%Y-%m-%d") {
if file_date < retention_threshold {
fs::remove_file(path)?;
}
}
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
#[test]
fn test_resolve_path() {
let base_dir = PathBuf::from("/Users/test/projects");
// relative path
assert_eq!(
resolve_path(&base_dir, "github.com/hoge/fuga"),
PathBuf::from("/Users/test/projects/github.com/hoge/fuga")
);
// current directory
assert_eq!(
resolve_path(&base_dir, "./github.com/hoge/fuga"),
PathBuf::from("/Users/test/projects/github.com/hoge/fuga")
);
// parent directory
assert_eq!(
resolve_path(&base_dir, "../other/project"),
PathBuf::from("/Users/test/other/project")
);
// multiple ..
assert_eq!(
resolve_path(&base_dir, "foo/bar/../../../baz"),
PathBuf::from("/Users/test/baz")
);
// absolute path
assert_eq!(
resolve_path(&base_dir, "/absolute/path"),
PathBuf::from("/absolute/path")
);
// empty relative path
assert_eq!(
resolve_path(&base_dir, ""),
PathBuf::from("/Users/test/projects")
);
// ending /
assert_eq!(
resolve_path(&base_dir, "github.com/hoge/fuga/"),
PathBuf::from("/Users/test/projects/github.com/hoge/fuga")
);
// complex path
assert_eq!(
resolve_path(&base_dir, "./foo/../bar/./baz/../qux/"),
PathBuf::from("/Users/test/projects/bar/qux")
);
}
#[test]
fn test_clean_old_logs() {
let home_dir = dirs::home_dir().unwrap();
let log_dir = home_dir.join(".config/testing_language_server/logs");
std::fs::create_dir_all(&log_dir).unwrap();
// Create test log files
let old_file = log_dir.join("prefix.log.2023-01-01");
File::create(&old_file).unwrap();
let recent_file = log_dir.join("prefix.log.2099-12-31");
File::create(&recent_file).unwrap();
let non_log_file = log_dir.join("not_a_log.txt");
File::create(&non_log_file).unwrap();
// Run the clean_old_logs function
clean_old_logs(log_dir.to_str().unwrap(), 30, "prefix.log.*", "prefix.log.").unwrap();
// Check results
assert!(!old_file.exists(), "Old log file should be deleted");
assert!(
recent_file.exists(),
"Recent log file should not be deleted"
);
assert!(non_log_file.exists(), "Non-log file should not be deleted");
}
}

View file

@ -1,29 +0,0 @@
{
"languageserver": {
"testing": {
"command": "testing-language-server",
"trace.server": "verbose",
"filetypes": ["rust", "javascript"],
"initializationOptions": {
"adapterCommand": {
".rs": [
{
"path": "testing-ls-adapter",
"extra_args": ["--test-kind=cargo-test"],
"include_patterns": ["/**/*.rs"],
"exclude_patterns": ["/**/target/**"]
}
],
".js": [
{
"path": "testing-ls-adapter",
"extra_args": ["--test-kind=jest"],
"include_patterns": ["/**/*.js"],
"exclude_patterns": ["/node_modules/**/*"]
}
]
}
}
}
}
}

View file

@ -1,10 +0,0 @@
describe("another", () => {
it("fail", () => {
expect(1).toBe(0)
})
it("pass", () => {
expect(1).toBe(1)
})
})

View file

@ -1,28 +0,0 @@
fn hello() {
println!("Hello, world!");
}
#[cfg(test)]
mod tests {
fn not_test() {}
#[test]
fn success() {
assert!(true);
}
#[test]
fn fail() {
assert!(false);
}
#[tokio::test]
async fn tokio_test_success() {
assert!(true);
}
#[tokio::test]
async fn tokio_test_fail() {
assert!(false);
}
}