diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000..59c989e6 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,11 @@ +[target.x86_64-apple-darwin] +rustflags = [ + "-C", "link-arg=-undefined", + "-C", "link-arg=dynamic_lookup", +] + +[target.aarch64-apple-darwin] +rustflags = [ + "-C", "link-arg=-undefined", + "-C", "link-arg=dynamic_lookup", +] \ No newline at end of file diff --git a/.circleci/.pyre_configuration b/.circleci/.pyre_configuration deleted file mode 100644 index 7ffbe4d8..00000000 --- a/.circleci/.pyre_configuration +++ /dev/null @@ -1,12 +0,0 @@ -{ - "source_directories": [ - "." - ], - "search_path": [ - "stubs", "/tmp/libcst-env/lib/python3.7/site-packages" - ], - "exclude": [ - ".*/\\.tox/.*" - ], - "strict": true -} diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 1ae8cce7..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,114 +0,0 @@ -# Python CircleCI 2.0 configuration file -# -# Check https://circleci.com/docs/2.0/language-python/ for more details -# -version: 2.1 -workflows: - version: 2 - test: - jobs: - - lint - - docs - - pyre - - test-38 - - test-37 - - test-36 - - test-coverage - -commands: - tox: - description: "setup tox env and run tox command giving env parameter" - parameters: - env: - type: string - default: test - steps: - - checkout - - restore_cache: - key: tox-v1-{{ checksum "tox.ini" }}-{{ checksum "requirements.txt" }}-{{ checksum "requirements-dev.txt" }}-{{ checksum "setup.py" }}-{{ checksum ".circleci/config.yml" }}-<< parameters.env >> - - run: - name: install tox - command: pip install --user tox - - run: - name: run tox - command: ~/.local/bin/tox -e << parameters.env >> - - save_cache: - key: tox-v1-{{ checksum "tox.ini" }}-{{ checksum "requirements.txt" }}-{{ checksum "requirements-dev.txt" }}-{{ checksum "setup.py" }}-{{ checksum ".circleci/config.yml" }}-<< parameters.env >> - paths: - - '.tox' - -jobs: - lint: - docker: - - image: circleci/python:3.7 - steps: - - tox: - env: "lint" - - docs: - docker: - - image: circleci/python:3.7 - steps: - - run: - command: sudo apt-get install graphviz - - tox: - env: "docs" - - store_artifacts: - path: docs/build - destination: doc - - pyre: - docker: - - image: circleci/python:3.7 - steps: - - checkout - - restore_cache: - key: pyre-v1-{{ checksum "tox.ini" }}-{{ checksum "requirements.txt" }}-{{ checksum "requirements-dev.txt" }}-{{ checksum "setup.py" }}-{{ checksum ".circleci/config.yml" }} - - run: - name: run pyre - command: | - test -d /tmp/libcst-env/ || python3 -m venv /tmp/libcst-env/ - source /tmp/libcst-env/bin/activate - pip install --upgrade pip - pip install -r requirements.txt -r requirements-dev.txt - pip uninstall -y libcst - pip install -e . - cp .circleci/.pyre_configuration . - pyre check - PYTHONPATH=`pwd` python libcst/tests/test_pyre_integration.py - git diff --exit-code # verify no generated changes - - save_cache: - key: pyre-v1-{{ checksum "tox.ini" }}-{{ checksum "requirements.txt" }}-{{ checksum "requirements-dev.txt" }}-{{ checksum "setup.py" }}-{{ checksum ".circleci/config.yml" }} - paths: - - '/tmp/libcst-env/' - - test-38: - docker: - - image: circleci/python:3.8 - steps: - - tox: - env: "py38" - - test-37: - docker: - - image: circleci/python:3.7 - steps: - - tox: - env: "py37" - - test-coverage: - docker: - - image: circleci/python:3.7 - steps: - - tox: - env: "py37" - - tox: - env: "coverage" - - test-36: - docker: - - image: circleci/python:3.6 - steps: - - tox: - env: "py36" - diff --git a/.editorconfig b/.editorconfig index 0824f669..9f02a19e 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,10 +1,14 @@ root = true -[*.{py,pyi,toml,md}] -charset = "utf-8" +[*.{py,pyi,rs,toml,md}] +charset = utf-8 end_of_line = lf indent_size = 4 indent_style = space insert_final_newline = true trim_trailing_whitespace = true max_line_length = 88 + +[*.rs] +# https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/guide.md +max_line_length = 100 diff --git a/.flake8 b/.flake8 index e681a88c..eb0260b0 100644 --- a/.flake8 +++ b/.flake8 @@ -1,69 +1,126 @@ [flake8] ignore = - C407, # unnecessary list comprehension; A generator only better than a list - # comprehension if we don't always need to iterate through all items in - # the generator (based on the use case). - + # unnecessary list comprehension; A generator only better than a list + # comprehension if we don't always need to iterate through all items in + # the generator (based on the use case). + C407, # The following codes belong to pycodestyle, and overlap with black: - E101, # indentation contains mixed spaces and tabs - E111, # indentation is not a multiple of four - E112, # expected an indented block - E113, # unexpected indentation - E114, # indentation is not a multiple of four (comment) - E115, # expected an indented block (comment) - E116, # unexpected indentation (comment) - E121, # continuation line under-indented for hanging indent - E122, # continuation line missing indentation or outdented - E123, # closing bracket does not match indentation of opening bracket’s line - E124, # closing bracket does not match visual indentation - E125, # continuation line with same indent as next logical line - E126, # continuation line over-indented for hanging indent - E127, # continuation line over-indented for visual indent; is harmless - # (over-indent is visually unambiguous) and currently generates too - # many warnings for existing code. - E128, # continuation line under-indented for visual indent - E129, # visually indented line with same indent as next logical line - E131, # continuation line unaligned for hanging indent - E133, # closing bracket is missing indentation - E201, # whitespace after ‘(‘ - E202, # whitespace before ‘)’ - E203, # whitespace before ‘:’; this warning is invalid for slices - E211, # whitespace before ‘(‘ - E221, # multiple spaces before operator - E222, # multiple spaces after operator - E223, # tab before operator - E224, # tab after operator - E225, # missing whitespace around operator - E226, # missing whitespace around arithmetic operator - E227, # missing whitespace around bitwise or shift operator - E228, # missing whitespace around modulo operator - E231, # missing whitespace after ‘,’, ‘;’, or ‘:’ - E241, # multiple spaces after ‘,’ - E242, # tab after ‘,’ - E251, # unexpected spaces around keyword / parameter equals - E261, # at least two spaces before inline comment - E262, # inline comment should start with ‘# ‘ - E265, # block comment should start with ‘# ‘ - E266, # too many leading ‘#’ for block comment - E271, # multiple spaces after keyword - E272, # multiple spaces before keyword - E273, # tab after keyword - E274, # tab before keyword - E275, # missing whitespace after keyword - E301, # expected 1 blank line, found 0 - E302, # expected 2 blank lines, found 0 - E303, # too many blank lines (3) - E304, # blank lines found after function decorator - E305, # expected 2 blank lines after end of function or class - E306, # expected 1 blank line before a nested definition - E401, # multiple imports on one line - E501, # line too long (> 79 characters) - E502, # the backslash is redundant between brackets - E701, # multiple statements on one line (colon) - E702, # multiple statements on one line (semicolon) - E703, # statement ends with a semicolon - E704, # multiple statements on one line (def) + # indentation contains mixed spaces and tabs + E101, + # indentation is not a multiple of four + E111, + # expected an indented block + E112, + # unexpected indentation + E113, + # indentation is not a multiple of four (comment) + E114, + # expected an indented block (comment) + E115, + # unexpected indentation (comment) + E116, + # continuation line under-indented for hanging indent + E121, + # continuation line missing indentation or outdented + E122, + # closing bracket does not match indentation of opening bracket’s line + E123, + # closing bracket does not match visual indentation + E124, + # continuation line with same indent as next logical line + E125, + # continuation line over-indented for hanging indent + E126, + # continuation line over-indented for visual indent; is harmless + # (over-indent is visually unambiguous) and currently generates too + # many warnings for existing code. + E127, + + # continuation line under-indented for visual indent + E128, + # visually indented line with same indent as next logical line + E129, + # continuation line unaligned for hanging indent + E131, + # closing bracket is missing indentation + E133, + # whitespace after ‘(‘ + E201, + # whitespace before ‘)’ + E202, + # whitespace before ‘:’; this warning is invalid for slices + E203, + # whitespace before ‘(‘ + E211, + # multiple spaces before operator + E221, + # multiple spaces after operator + E222, + # tab before operator + E223, + # tab after operator + E224, + # missing whitespace around operator + E225, + # missing whitespace around arithmetic operator + E226, + # missing whitespace around bitwise or shift operator + E227, + # missing whitespace around modulo operator + E228, + # missing whitespace after ‘,’, ‘;’, or ‘:’ + E231, + # multiple spaces after ‘,’ + E241, + # tab after ‘,’ + E242, + # unexpected spaces around keyword / parameter equals + E251, + # at least two spaces before inline comment + E261, + # inline comment should start with ‘# ‘ + E262, + # block comment should start with ‘# ‘ + E265, + # too many leading ‘#’ for block comment + E266, + # multiple spaces after keyword + E271, + # multiple spaces before keyword + E272, + # tab after keyword + E273, + # tab before keyword + E274, + # missing whitespace after keyword + E275, + # expected 1 blank line, found 0 + E301, + # expected 2 blank lines, found 0 + E302, + # too many blank lines (3) + E303, + # blank lines found after function decorator + E304, + # expected 2 blank lines after end of function or class + E305, + # expected 1 blank line before a nested definition + E306, + # multiple imports on one line + E401, + # line too long (> 79 characters) + E501, + # the backslash is redundant between brackets + E502, + # multiple statements on one line (colon) + E701, + # multiple statements on one line (semicolon) + E702, + # statement ends with a semicolon + E703, + # multiple statements on one line (def) + E704, # These are pycodestyle lints that black doesn't catch: # E711, # comparison to None should be ‘if cond is None:’ # E712, # comparison to True should be ‘if cond is True:’ or ‘if cond:’ @@ -78,16 +135,25 @@ ignore = # I think these are internal to pycodestyle? # E901, # SyntaxError or IndentationError # E902, # IOError - F811, # isn't aware of type-only imports, results in false-positives - W191, # indentation contains tabs - W291, # trailing whitespace - W292, # no newline at end of file - W293, # blank line contains whitespace - W391, # blank line at end of file - W503, # line break before binary operator; binary operator in a new line is - # the standard - W504, # line break after binary operator - W505, # not part of PEP8; doc line too long (> 79 characters) + # isn't aware of type-only imports, results in false-positives + F811, + # indentation contains tabs + W191, + # trailing whitespace + W291, + # no newline at end of file + W292, + # blank line contains whitespace + W293, + # blank line at end of file + W391, + # line break before binary operator; binary operator in a new line is + # the standard + W503, + # line break after binary operator + W504, + # not part of PEP8; doc line too long (> 79 characters) + W505, # These are pycodestyle lints that black doesn't catch: # W601, # .has_key() is deprecated, use ‘in’ # W602, # deprecated form of raising exception @@ -106,6 +172,7 @@ exclude = .pyre, __pycache__, .tox, + native, max-complexity = 12 diff --git a/.github/build-matrix.json b/.github/build-matrix.json new file mode 100644 index 00000000..3a1db7b3 --- /dev/null +++ b/.github/build-matrix.json @@ -0,0 +1,31 @@ +[ + { + "vers": "x86_64", + "os": "ubuntu-20.04" + }, + { + "vers": "i686", + "os": "ubuntu-20.04" + }, + { + "vers": "arm64", + "os": "macos-latest" + }, + { + "vers": "auto64", + "os": "macos-latest" + }, + { + "vers": "auto64", + "os": "windows-2019" + }, + { + "vers": "aarch64", + "os": [ + "self-hosted", + "linux", + "ARM64" + ], + "on_ref_regex": "^refs/(heads/main|tags/.*)$" + } +] \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..40738c8d --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,18 @@ +# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + + - package-ecosystem: cargo + directory: "/native" + schedule: + interval: weekly + + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..0df65636 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,45 @@ +name: build +on: + workflow_call: + +jobs: + # Build python wheels + build: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: + [ + macos-latest, + ubuntu-latest, + ubuntu-24.04-arm, + windows-latest, + windows-11-arm, + ] + env: + SCCACHE_VERSION: 0.2.13 + GITHUB_WORKSPACE: "${{github.workspace}}" + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false + - uses: actions/setup-python@v6 + with: + python-version: "3.12" + - uses: dtolnay/rust-toolchain@stable + - name: Disable scmtools local scheme + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + run: >- + echo LIBCST_NO_LOCAL_SCHEME=1 >> $GITHUB_ENV + - name: Enable building wheels for pre-release CPython versions + if: github.event_name != 'release' + run: echo CIBW_ENABLE=cpython-prerelease >> $GITHUB_ENV + - name: Build wheels + uses: pypa/cibuildwheel@v3.2.1 + - uses: actions/upload-artifact@v4 + with: + path: wheelhouse/*.whl + name: wheels-${{matrix.os}} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..dd3665ad --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,142 @@ +name: CI + +on: + push: + branches: + - main + pull_request: + +permissions: {} + +jobs: + test: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + python-version: + - "3.9" + - "3.10" + - "3.11" + - "3.12" + - "3.13" + - "3.13t" + - "3.14" + - "3.14t" + steps: + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + version: "0.7.13" + python-version: ${{ matrix.python-version }} + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false + - uses: dtolnay/rust-toolchain@stable + - name: Build LibCST + run: uv sync --locked --dev + - name: Native Parser Tests + run: uv run poe test + - name: Coverage + run: uv run coverage report + + # Run linters + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + version: "0.7.13" + python-version: "3.10" + - run: uv run poe lint + - run: uv run poe fixtures + + # Run pyre typechecker + typecheck: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + version: "0.7.13" + python-version: "3.10" + - run: uv run poe typecheck + + # Build the docs + docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + version: "0.7.13" + python-version: "3.10" + - uses: ts-graphviz/setup-graphviz@v2 + - run: uv run --group docs poe docs + - name: Archive Docs + uses: actions/upload-artifact@v4 + with: + name: sphinx-docs + path: docs/build + + # Test rust parts + native: + name: Rust unit tests + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ["3.10", "3.13t"] + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt, clippy + - uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.python-version }} + - name: test + run: cargo test --manifest-path=native/Cargo.toml --release + - name: test without python + if: matrix.os == 'ubuntu-latest' + run: cargo test --manifest-path=native/Cargo.toml --release --no-default-features + - name: clippy + run: cargo clippy --manifest-path=native/Cargo.toml --all-targets --all-features + - name: compile-benchmarks + run: cargo bench --manifest-path=native/Cargo.toml --no-run + + rustfmt: + name: Rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + - run: rustup component add rustfmt + - name: format + run: cargo fmt --all --manifest-path=native/Cargo.toml -- --check + build: + # only trigger here for pull requests - regular pushes are handled in pypi_upload + if: ${{ github.event_name == 'pull_request' }} + uses: Instagram/LibCST/.github/workflows/build.yml@main diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml new file mode 100644 index 00000000..04434a24 --- /dev/null +++ b/.github/workflows/pypi_upload.yml @@ -0,0 +1,60 @@ +name: pypi_upload + +on: + release: + types: [published] + push: + branches: [main] + +permissions: + contents: read + +jobs: + build: + uses: Instagram/LibCST/.github/workflows/build.yml@main + upload_release: + name: Upload wheels to pypi + runs-on: ubuntu-latest + needs: build + permissions: + id-token: write + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + persist-credentials: false + - name: Download binary wheels + id: download + uses: actions/download-artifact@v5 + with: + pattern: wheels-* + path: wheelhouse + merge-multiple: true + - uses: actions/setup-python@v6 + with: + python-version: "3.10" + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + version: "0.7.13" + enable-cache: false + - name: Build a source tarball + env: + LIBCST_NO_LOCAL_SCHEME: 1 + OUTDIR: ${{ steps.download.outputs.download-path }} + run: >- + uv run python -m + build + --sdist + --outdir "$OUTDIR" + - name: Publish distribution 📦 to Test PyPI + if: github.event_name == 'push' + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + packages-dir: ${{ steps.download.outputs.download-path }} + - name: Publish distribution 📦 to PyPI + if: github.event_name == 'release' + uses: pypa/gh-action-pypi-publish@release/v1 + with: + packages-dir: ${{ steps.download.outputs.download-path }} diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml new file mode 100644 index 00000000..47fdfe00 --- /dev/null +++ b/.github/workflows/zizmor.yml @@ -0,0 +1,35 @@ +name: GitHub Actions Security Analysis with zizmor 🌈 + +on: + push: + branches: ["main"] + pull_request: + branches: ["**"] + +jobs: + zizmor: + name: zizmor latest via PyPI + runs-on: ubuntu-latest + permissions: + security-events: write + contents: read + actions: read + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Install the latest version of uv + uses: astral-sh/setup-uv@v7 + + - name: Run zizmor 🌈 + run: uvx zizmor --format sarif . > results.sarif + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v4 + with: + sarif_file: results.sarif + category: zizmor \ No newline at end of file diff --git a/.gitignore b/.gitignore index 85fb5573..004ebb4c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,11 @@ *.swp *.swo *.pyc +*.pyd *.pyo +*.so *.egg-info/ +.eggs/ .pyre/ __pycache__/ .tox/ @@ -10,6 +13,11 @@ docs/build/ dist/ docs/source/.ipynb_checkpoints/ build/ +libcst/_version.py .coverage .hypothesis/ -.pyre_configuration +.python-version +target/ +venv/ +.venv/ +.idea/ diff --git a/.pyre_configuration b/.pyre_configuration new file mode 100644 index 00000000..cf108076 --- /dev/null +++ b/.pyre_configuration @@ -0,0 +1,16 @@ +{ + "exclude": [ + ".*\/native\/.*" + ], + "ignore_all_errors": [ + ".venv" + ], + "source_directories": [ + "." + ], + "search_path": [ + "stubs", {"site-package": "setuptools_rust"} + ], + "workers": 3, + "strict": true +} diff --git a/.pyre_configuration.example b/.pyre_configuration.example deleted file mode 100644 index 784a7958..00000000 --- a/.pyre_configuration.example +++ /dev/null @@ -1,12 +0,0 @@ -{ - "source_directories": [ - "." - ], - "search_path": [ - "stubs" - ], - "exclude": [ - ".*/\\.tox/.*" - ], - "strict": true -} diff --git a/.readthedocs.yml b/.readthedocs.yml index c76ca987..bb6eb608 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -5,12 +5,18 @@ sphinx: formats: all +build: + os: ubuntu-20.04 + tools: + python: "3" + rust: "1.70" + apt_packages: + - graphviz + python: - version: 3.7 install: - - requirements: requirements.txt - - requirements: requirements-dev.txt - method: pip path: . - system_packages: true + extra_requirements: + - dev diff --git a/CHANGELOG.md b/CHANGELOG.md index dd0d1673..f72d53f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,412 +1,1071 @@ +# 1.8.6 - 2025-11-03 + +## What's Changed +* Update pyproject.toml for 3.14t by @itamaro in https://github.com/Instagram/LibCST/pull/1417 +* Update PyO3 to 0.26 by @cjwatson in https://github.com/Instagram/LibCST/pull/1413 +* Make CodemodCommand's supported_transforms order deterministic by @frvnkliu in https://github.com/Instagram/LibCST/pull/1424 + +## New Contributors +* @cjwatson made their first contribution in https://github.com/Instagram/LibCST/pull/1413 +* @frvnkliu made their first contribution in https://github.com/Instagram/LibCST/pull/1424 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.8.5...v1.8.6 + +# 1.8.5 - 2025-09-25 + +## What's Changed +* fixed: circular import error by @drinkmorewaterr in https://github.com/Instagram/LibCST/pull/1406 + + +# 1.8.4 - 2025-09-09 + +## What's Changed +* fixed: generate Attribute nodes when applying type annotations by @tungol in https://github.com/Instagram/LibCST/pull/1396 +* added: Support parsing of t-strings #1374 by @drinkmorewaterr in https://github.com/Instagram/LibCST/pull/1398 +* added: add support for PEP758 by @drinkmorewaterr in https://github.com/Instagram/LibCST/pull/1401 + +## New Contributors +* @tungol made their first contribution in https://github.com/Instagram/LibCST/pull/1396 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.8.2...v1.8.4 + +# 1.8.3 - 2025-08-29 +## What's Changed +* removed: remove entry points to pure parser by @drinkmorewaterr in https://github.com/Instagram/LibCST/pull/1375 +* fixed: fixes match statements to work with PositionProvider by @imsut in https://github.com/Instagram/LibCST/pull/1389 + + +## New Contributors +* @hunterhogan made their first contribution in https://github.com/Instagram/LibCST/pull/1378 +* @thomas-serre-sonarsource made their first contribution in https://github.com/Instagram/LibCST/pull/1379 +* @imsut made their first contribution in https://github.com/Instagram/LibCST/pull/1389 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.8.2...v1.8.3 + +# 1.8.2 - 2025-06-13 + +# Fixed +* fix(dependency): add back typing-extensions for 3.9 by @Lee-W in https://github.com/Instagram/LibCST/pull/1358 + +## New Contributors +* @Lee-W made their first contribution in https://github.com/Instagram/LibCST/pull/1358 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.8.1...v1.8.2 + +# 1.8.1 - 2025-06-10 + +## Added +* add helper to convert nodes to matchers by @zsol in https://github.com/Instagram/LibCST/pull/1351 + +## Updated +* Avoid raising bare Exception by @zaicruvoir1rominet in https://github.com/Instagram/LibCST/pull/1168 +* Upgrade PyYAML-ft version and use new module name by @lysnikolaou in https://github.com/Instagram/LibCST/pull/1353 + +## New Contributors +* @lysnikolaou made their first contribution in https://github.com/Instagram/LibCST/pull/1353 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.8.0...v1.8.1 + +# 1.8.0 - 2025-05-27 + +## Added +* Allow configuring empty formatter lists in codemod CLI by @ngoldbaum in https://github.com/Instagram/LibCST/pull/1319 +* Publish several new binary wheels + * macos intel by @hadialqattan in https://github.com/Instagram/LibCST/pull/1316 + * windows arm64 by @zsol in https://github.com/Instagram/LibCST/pull/1304 + * 3.13 CPython free-threaded by @zsol in https://github.com/Instagram/LibCST/pull/1333 + * (only on [test.pypi.org](https://test.pypi.org/project/libcst/#history)) 3.14 and 3.14 CPython free-threaded by @amyreese and @zsol in https://github.com/Instagram/LibCST/pull/1345 and https://github.com/Instagram/LibCST/pull/1331 +* Enable support for free-threaded CPython by @zsol in https://github.com/Instagram/LibCST/pull/1295 and https://github.com/Instagram/LibCST/pull/1335 + +## Updated +* update pyo3 to 0.25 by @ngoldbaum in https://github.com/Instagram/LibCST/pull/1324 +* Replace multiprocessing with ProcessPoolExecutor by @zsol in https://github.com/Instagram/LibCST/pull/1294 +* Support pipe syntax for Union types in codegen by @zsol in https://github.com/Instagram/LibCST/pull/1336 + +## New Contributors +* @hadialqattan made their first contribution in https://github.com/Instagram/LibCST/pull/1316 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.7.0...v1.8.0 + +# 1.7.0 - 2025-03-13 + +## Added +* add free-threaded CI by @ngoldbaum in https://github.com/Instagram/LibCST/pull/1312 + +## Updated +* Remove dependency on `chic` and upgrade `annotate-snippets` by @zanieb in https://github.com/Instagram/LibCST/pull/1293 +* Update for Pyo3 0.23 by @ngoldbaum in https://github.com/Instagram/LibCST/pull/1289 +* Bump PyO3 to 0.23.5 by @mgorny in https://github.com/Instagram/LibCST/pull/1311 + +## New Contributors +* @zanieb made their first contribution in https://github.com/Instagram/LibCST/pull/1293 +* @ngoldbaum made their first contribution in https://github.com/Instagram/LibCST/pull/1289 +* @mgorny made their first contribution in https://github.com/Instagram/LibCST/pull/1311 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.6.0...v1.7.0 + +# 1.6.0 - 2025-01-09 + +## Fixed + +* rename: store state in scratch by @zsol in https://github.com/Instagram/LibCST/pull/1250 +* rename: handle imports via a parent module by @zsol in https://github.com/Instagram/LibCST/pull/1251 +* rename: Fix imports with aliases by @zsol in https://github.com/Instagram/LibCST/pull/1252 +* rename: don't leave trailing commas by @zsol in https://github.com/Instagram/LibCST/pull/1254 +* rename: don't eat commas unnecessarily by @zsol in https://github.com/Instagram/LibCST/pull/1256 +* rename: fix renaming toplevel names by @zsol in https://github.com/Instagram/LibCST/pull/1260 +* bump 3.12 to 3.13 in readme by @khameeteman in https://github.com/Instagram/LibCST/pull/1228 + +## Added + +* Add codemod to convert `typing.Union` to `|` by @yangdanny97 in https://github.com/Instagram/LibCST/pull/1270 +* Add codemod to fix variadic callable annotations by @yangdanny97 in https://github.com/Instagram/LibCST/pull/1269 +* Add codemod to rename typing aliases of builtins by @yangdanny97 in https://github.com/Instagram/LibCST/pull/1267 +* Add typing classifier to pyproject.toml and badge to README by @yangdanny97 in https://github.com/Instagram/LibCST/pull/1272 +* Expose TypeAlias and TypeVar related structs in rust library by @Crozzers in https://github.com/Instagram/LibCST/pull/1274 + +## Updated +* Upgrade pyo3 to 0.22 by @jelmer in https://github.com/Instagram/LibCST/pull/1180 + +## New Contributors +* @yangdanny97 made their first contribution in https://github.com/Instagram/LibCST/pull/1270 +* @Crozzers made their first contribution in https://github.com/Instagram/LibCST/pull/1274 +* @jelmer made their first contribution in https://github.com/Instagram/LibCST/pull/1180 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.5.1...v1.6.0 + +# 1.5.1 - 2024-11-18 + +## Added + +* build wheels for musllinux by @MrMino in https://github.com/Instagram/LibCST/pull/1243 + +## New Contributors +* @MrMino made their first contribution in https://github.com/Instagram/LibCST/pull/1243 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.5.0...v1.5.1 + +# 1.5.0 - 2024-10-10 + +## Added +* FullyQualifiedNameProvider: Optionally consider pyproject.toml files when determining a file's module name and package by @camillol in https://github.com/Instagram/LibCST/pull/1148 +* Add validation for If node by @kiri11 in https://github.com/Instagram/LibCST/pull/1177 +* include python 3.13 in build by @khameeteman in https://github.com/Instagram/LibCST/pull/1203 + +## Fixed +* fix various Match statement visitation errors by @zsol in https://github.com/Instagram/LibCST/pull/1161 +* Mention codemod -x flag in docs by @kiri11 in https://github.com/Instagram/LibCST/pull/1169 +* Clear warnings for each file in codemod cli by @kiri11 in https://github.com/Instagram/LibCST/pull/1184 +* Typo fix in codemods_tutorial.rst (trivial) by @wimglenn in https://github.com/Instagram/LibCST/pull/1208 +* fix certain matchers breaking under multiprocessing by initializing them late by @kiri11 in https://github.com/Instagram/LibCST/pull/1204 + +## Updated +* make libcst_native::tokenizer public by @zsol in https://github.com/Instagram/LibCST/pull/1182 +* Use `license` instead of `license-file` by @michel-slm in https://github.com/Instagram/LibCST/pull/1189 +* Drop codecov from CI and readme by @amyreese in https://github.com/Instagram/LibCST/pull/1192 + + +## New Contributors +* @kiri11 made their first contribution in https://github.com/Instagram/LibCST/pull/1169 +* @grievejia made their first contribution in https://github.com/Instagram/LibCST/pull/1174 +* @michel-slm made their first contribution in https://github.com/Instagram/LibCST/pull/1189 +* @wimglenn made their first contribution in https://github.com/Instagram/LibCST/pull/1208 +* @khameeteman made their first contribution in https://github.com/Instagram/LibCST/pull/1203 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.4.0...v1.5.0 + +# 1.4.0 - 2024-05-22 + +## Fixed +* Fix Literal parse error in RemoveImportsVisitor by @camillol in https://github.com/Instagram/LibCST/pull/1130 +* Don't reset context.scratch between files by @zsol in https://github.com/Instagram/LibCST/pull/1151 +* Various documentation fixes + * Typo fix FullRepoManager by @kit1980 in https://github.com/Instagram/LibCST/pull/1138 + * ✏️ Fix tiny typo in `docs/source/metadata.rst` by @tiangolo in https://github.com/Instagram/LibCST/pull/1134 + * ✏️ Fix typo in `docs/source/scope_tutorial.ipynb` by @tiangolo in https://github.com/Instagram/LibCST/pull/1135 + * Update CONTRIBUTING.md by @zaicruvoir1rominet in https://github.com/Instagram/LibCST/pull/1142 + +## Added + +* Add helper functions for common ways of filtering nodes by @zaicruvoir1rominet in https://github.com/Instagram/LibCST/pull/1137 +* Dump CST to .dot (graphviz) files by @zaicruvoir1rominet in https://github.com/Instagram/LibCST/pull/1147 +* Implement PEP-696 by @thereversiblewheel in https://github.com/Instagram/LibCST/pull/1141 + +## New Contributors +* @tiangolo made their first contribution in https://github.com/Instagram/LibCST/pull/1134 +* @camillol made their first contribution in https://github.com/Instagram/LibCST/pull/1130 +* @zaicruvoir1rominet made their first contribution in https://github.com/Instagram/LibCST/pull/1142 +* @thereversiblewheel made their first contribution in https://github.com/Instagram/LibCST/pull/1141 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.3.1...v1.4.0 + +# 1.3.1 - 2024-04-03 + +## Fixed +* ImportError due to missing `mypy_extensions` dependency by @zsol in https://github.com/Instagram/LibCST/pull/1128 + +# 1.3.0 - 2024-04-03 + +## Updated +* Removed dependencies on `typing_extensions` and `typing_inspect` by @zsol in https://github.com/Instagram/LibCST/pull/1126 + +# 1.2.0 - 2024-02-19 + +## Updated +* Support running LibCST on Python 3.12 and drop support for running it on 3.8 + * remove 3.8 support by @zsol in https://github.com/Instagram/LibCST/pull/1073 + * Remove reference to distutils by @zsol in https://github.com/Instagram/LibCST/pull/1099 + * Update pyproject.toml for Python 3.12 support by @itamaro in https://github.com/Instagram/LibCST/pull/1038 + +## Added +* Allow `Element::codegen` to be used by external users by @Wilfred in https://github.com/Instagram/LibCST/pull/1071 + +## Fixed +* Fix parsing list matchers without explicit brackets by @zsol in https://github.com/Instagram/LibCST/pull/1097 +* installing rustc/cargo for mybinder demo by @aleivag in https://github.com/Instagram/LibCST/pull/1083 +* fix filepathprovider generic type by @kinto0 in https://github.com/Instagram/LibCST/pull/1036 + +## New Contributors +* @itamaro made their first contribution in https://github.com/Instagram/LibCST/pull/1039 +* @kinto0 made their first contribution in https://github.com/Instagram/LibCST/pull/1036 +* @dtolnay made their first contribution in https://github.com/Instagram/LibCST/pull/1063 +* @anonymousdouble made their first contribution in https://github.com/Instagram/LibCST/pull/1082 +* @aleivag made their first contribution in https://github.com/Instagram/LibCST/pull/1083 +* @Wilfred made their first contribution in https://github.com/Instagram/LibCST/pull/1071 +* @diliop made their first contribution in https://github.com/Instagram/LibCST/pull/1106 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.1.0...v1.2.0 + +# 1.1.0 - 2023-10-05 + +## Added +* PEP 695 support + * parser: PEP 695 - Type Parameter Syntax #1004 + * Scope provider: support for type annotations #1014 +* PEP 701 support + * parser: support arbitrarily nested f-strings #1026 + * parser: Parse multiline expressions in f-strings #1027 +* parser: Support files with mixed newlines #1007 +* [libcst](https://crates.io/crates/libcst) is now published to crates.io + +## Fixed +* codemod/ApplyTypeAnnotationsVisitor: Do not annotate the same variable multiple times #956 +* parser: Don't swallow trailing whitespace #976 +* codemod/rename: Avoid duplicating import statements when the module name doesn't change #981 + +## Updated +* cli: Don't gather dirs ending .py #994 +* drop support for Python 3.7 #997 +* A few parser performance improvements: + * Switch to using thread_local regular expressions to stop mutext contention #996 + * Remove need for regex in TextPosition::matches #1002 + * Remove Regexes from whitespace parser #1008 + +# 1.0.1 - 2023-06-07 + +## Fixed +* Fix type of `evaluated_value` on string to allow bytes by @ljodal in https://github.com/Instagram/LibCST/pull/721 +* Fix Sentinal typo by @kit1980 in https://github.com/Instagram/LibCST/pull/948 +* Allow no whitespace after lambda body in certain cases by @zsol in https://github.com/Instagram/LibCST/pull/939 +* Fix whitespace, fstring, walrus related parse errors (#939, #938, #937, +#936, #935, #934, #933, #932, #931) by @zsol in https://github.com/Instagram/LibCST/pull/940 +* Codemod CLI: Print diff only when there is a change by @kit1980 in https://github.com/Instagram/LibCST/pull/945 + +## New Contributors +* @ljodal made their first contribution in https://github.com/Instagram/LibCST/pull/721 +* @kit1980 made their first contribution in https://github.com/Instagram/LibCST/pull/948 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v1.0.0...v1.0.1 + +# 1.0.0 - 2023-05-25 + +The first major release of LibCST is essentially the same as 0.4.10, but using the +newer, Rust-based parser implementation by default. The old, pure Python parser is +scheduled for removal in the next (non-patch) release. Until then, it is available with +the `LIBCST_PARSER_TYPE` environment variable set to `pure`. + +## Updated + +* Switch the default parser implementation to native by @zsol in https://github.com/Instagram/LibCST/pull/929 + +# 0.4.10 - 2023-05-23 + +## New Contributors +* @and-semakin made their first contribution in https://github.com/Instagram/LibCST/pull/816 +* @carljm made their first contribution in https://github.com/Instagram/LibCST/pull/828 +* @sagarbadiyani made their first contribution in https://github.com/Instagram/LibCST/pull/841 +* @podtserkovskiy made their first contribution in https://github.com/Instagram/LibCST/pull/894 +* @rchen152 made their first contribution in https://github.com/Instagram/LibCST/pull/903 +* @Kludex made their first contribution in https://github.com/Instagram/LibCST/pull/913 +* @jakkdl made their first contribution in https://github.com/Instagram/LibCST/pull/921 + +## Added +* Add py3.11 classifier by @and-semakin in https://github.com/Instagram/LibCST/pull/816 +* Script to regenerate test fixtures, upgrade to Pyre 0.9.10 by @amyreese in https://github.com/Instagram/LibCST/pull/872 +* Allow FullyQualifiedNameProvider to work with absolute paths by @amyreese in https://github.com/Instagram/LibCST/pull/867 +* Allow running codemods without configuring in YAML by @akx in https://github.com/Instagram/LibCST/pull/879 +* Support PEP 604 in ApplyTypeAnnotationsVisitor by @hauntsaninja in https://github.com/Instagram/LibCST/pull/868 + +## Fixed +* fix PEP 604 union annotations in decorators by @carljm in https://github.com/Instagram/LibCST/pull/828 +* [AddImportsVisitor] Docstring Check Only for the Top Element of the Body by @sagarbadiyani in https://github.com/Instagram/LibCST/pull/841 +* Fix [#855](https://github.com/Instagram/LibCST/issues/855) - fail to parse with statement by @stroxler in https://github.com/Instagram/LibCST/pull/861 +* Add setuptools-rust to build requirements in setup.py by @amyreese in https://github.com/Instagram/LibCST/pull/873 +* Relative imports from '' package are not allowed by @podtserkovskiy in https://github.com/Instagram/LibCST/pull/894 +* Use subprocess.DEVNULL instead of opening os.devnull by hand by @akx in https://github.com/Instagram/LibCST/pull/897 +* Ensure current Python interpreter is used for subprocesses by @akx in https://github.com/Instagram/LibCST/pull/898 +* Fix ApplyTypeAnnotationsVisitor behavior on attribute assignments. by @rchen152 in https://github.com/Instagram/LibCST/pull/903 +* Fix spelling and grammar in some comments by @stroxler in https://github.com/Instagram/LibCST/pull/908 +* skip escaped backslash in rf-string by @jakkdl in https://github.com/Instagram/LibCST/pull/921 +* relax validation rules on decorators by @jakkdl in https://github.com/Instagram/LibCST/pull/926 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v0.4.9...v0.4.10 + +# 0.4.9 - 2022-11-10 + +## Updated +* Bump setuptools-rust version by @zsol in https://github.com/Instagram/LibCST/pull/809 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v0.4.8...v0.4.9 + +# 0.4.8 - 2022-11-10 + +## New Contributors +* @dhruvmanila made their first contribution in https://github.com/Instagram/LibCST/pull/728 +* @vfazio made their first contribution in https://github.com/Instagram/LibCST/pull/801 +* @matthewshaer made their first contribution in https://github.com/Instagram/LibCST/pull/807 + + +## Fixed +* Fix parse error message for number parsing by @zzl0 in https://github.com/Instagram/LibCST/pull/724 +* Fix problematic doc build, due to the new builder image provided by readthedocs doesn't has the `graphviz-dev` package pre-installed any more by @MapleCCC in https://github.com/Instagram/LibCST/pull/751 +* Fix docstring of `FullRepoManager` by @MapleCCC in https://github.com/Instagram/LibCST/pull/750 +* Fix bug when `TypeOf` is one of options in `OneOf` / `AllOf` by @MapleCCC in https://github.com/Instagram/LibCST/pull/756 +* Tighten the metadata type of `ExpressionContextProvider` by @MapleCCC in https://github.com/Instagram/LibCST/pull/760 +* Fix the bug that the use of formatter in codemods has undetermined target Python version, resulting in hard-to-reason-with behavior by @MapleCCC in https://github.com/Instagram/LibCST/pull/771 + + +## Added +* Python 3.11 rutime support + * test using python 3.11 beta versions by @zsol in https://github.com/Instagram/LibCST/pull/723 + * Python 3.11 wheels by @vfazio in https://github.com/Instagram/LibCST/pull/801 +* Raise informative exception when metadata is unresolved in a metadata-based match by @MapleCCC in https://github.com/Instagram/LibCST/pull/757 +* Add AccessorProvider by @matthewshaer in https://github.com/Instagram/LibCST/pull/807 + +**Full Changelog**: https://github.com/Instagram/LibCST/compare/v0.4.7...v0.4.8 + +# 0.4.7 - 2022-07-12 + +## New Contributors +* @Chenguang-Zhu made their first contribution in https://github.com/Instagram/LibCST/pull/720 + +## Fixed +* Fix get_qualified_names_for matching on prefixes of the given name by @lpetre in https://github.com/Instagram/LibCST/pull/719 + +## Added +* Implement lazy loading mechanism for expensive metadata providers by @Chenguang-Zhu in https://github.com/Instagram/LibCST/pull/720 + +# 0.4.6 - 2022-07-04 + +## New Contributors +- @superbobry made their first contribution in https://github.com/Instagram/LibCST/pull/702 + +## Fixed +- convert_type_comments now preserves comments following type comments by @superbobry in https://github.com/Instagram/LibCST/pull/702 +- QualifiedNameProvider optimizations + - Cache the scope name prefix to prevent scope traversal in a tight loop by @lpetre in https://github.com/Instagram/LibCST/pull/708 + - Faster qualified name formatting by @lpetre in https://github.com/Instagram/LibCST/pull/710 + - Prevent unnecessary work in Scope.get_qualified_names_for_ by @lpetre in https://github.com/Instagram/LibCST/pull/709 +- Fix parsing of parenthesized empty tuples by @zsol in https://github.com/Instagram/LibCST/pull/712 +- Support whitespace after ParamSlash by @zsol in https://github.com/Instagram/LibCST/pull/713 +- [parser] bail on deeply nested expressions by @zsol in https://github.com/Instagram/LibCST/pull/718 + +# 0.4.5 - 2022-06-17 + +## New Contributors + +- @zzl0 made their first contribution in https://github.com/Instagram/LibCST/pull/704 + +## Fixed + +- Only skip supported escaped characters in f-strings by @zsol in https://github.com/Instagram/LibCST/pull/700 +- Escaping quote characters in raw string literals causes a tokenizer error by @zsol in https://github.com/Instagram/LibCST/issues/668 +- Corrected a code example in the documentation by @zzl0 in https://github.com/Instagram/LibCST/pull/703 +- Handle multiline strings that start with quotes by @zzl0 in https://github.com/Instagram/LibCST/pull/704 +- Fixed a performance regression in libcst.metadata.ScopeProvider by @lpetre in https://github.com/Instagram/LibCST/pull/698 + +# 0.4.4 - 2022-06-13 + +## New Contributors + +- @adamchainz made their first contribution in https://github.com/Instagram/LibCST/pull/688 + +## Added + +- Add package links to PyPI by @adamchainz in https://github.com/Instagram/LibCST/pull/688 +- native: add overall benchmark by @zsol in https://github.com/Instagram/LibCST/pull/692 +- Add support for PEP-646 by @zsol in https://github.com/Instagram/LibCST/pull/696 + +## Updated + +- parser: use references instead of smart pointers for Tokens by @zsol in https://github.com/Instagram/LibCST/pull/691 + +# 0.4.3 - 2022-05-11 + +## Fixed + +- Restore the 0.4.1 behavior for libcst.helpers.get_absolute_module by @lpetre in https://github.com/Instagram/LibCST/pull/684 + +# 0.4.2 - 2022-05-04 + +## New Contributors + +- @stanislavlevin made their first contribution in https://github.com/Instagram/LibCST/pull/650 +- @dmitryvinn made their first contribution in https://github.com/Instagram/LibCST/pull/655 +- @wiyr made their first contribution in https://github.com/Instagram/LibCST/pull/669 +- @toofar made their first contribution in https://github.com/Instagram/LibCST/pull/675 + +## Fixed + +- native: Avoid crashing by making IntoPy conversion fallible by @zsol in https://github.com/Instagram/LibCST/pull/639 +- native: make sure ParserError's line is zero-indexed by @zsol in https://github.com/Instagram/LibCST/pull/681 +- Fix space validation for AsName and Await by @zsol in https://github.com/Instagram/LibCST/pull/641 +- Qualified Name Provider: Fix returned qname for symbols that are prefixes of each other by @wiyr in https://github.com/Instagram/LibCST/pull/669 +- Rename Codemod: Correct last renamed import from by @toofar in https://github.com/Instagram/LibCST/pull/675 +- Many changes to the Apply Type Comments codemod: + - Allow for skipping quotes when applying type comments by @stroxler in https://github.com/Instagram/LibCST/pull/644 + - Port pyre fixes by @stroxler in https://github.com/Instagram/LibCST/pull/651 + - Preserve as-imports when merging type annotations. by @martindemello in https://github.com/Instagram/LibCST/pull/664 + - Qualify imported symbols when the dequalified form would cause a conflict by @martindemello in https://github.com/Instagram/LibCST/pull/674 + - Add an argument to always qualify imported type annotations. by @martindemello in https://github.com/Instagram/LibCST/pull/676 + +## Added + +- Create an AddTrailingCommas codemod by @stroxler in https://github.com/Instagram/LibCST/pull/643 +- Define gather global names visitor by @shannonzhu in https://github.com/Instagram/LibCST/pull/657 + +## Updated + +- Support module and package names in the codemod context by @lpetre in https://github.com/Instagram/LibCST/pull/662 +- Drop support for running libcst using a python 3.6 interpreter by @lpetre in https://github.com/Instagram/LibCST/pull/663 +- Update relative import logic to match cpython by @lpetre in https://github.com/Instagram/LibCST/pull/660 +- Scope Provider: Consider access information when computing qualified names for nodes by @lpetre in https://github.com/Instagram/LibCST/pull/682 + +# 0.4.1 - 2022-01-28 + +## New Contributors + +- @ariebovenberg made their first contribution in https://github.com/Instagram/LibCST/pull/605 +- @sehz made their first contribution in https://github.com/Instagram/LibCST/pull/598 + +## Added + +- Add docs about the native parts by @zsol in https://github.com/Instagram/LibCST/pull/601 +- Specify minimum rust toolchain version by @zsol in https://github.com/Instagram/LibCST/pull/614 +- build wheels on main branch for linux/arm64 by @zsol in https://github.com/Instagram/LibCST/pull/630 + +## Updated + +- ApplyTypeAnnotationVisitor changes + - Add support for methods with func type comment excluding self/cls by @stroxler in https://github.com/Instagram/LibCST/pull/622 + - Merge in TypeVars and Generic base classes in ApplyTypeAnnotationVisitor by @martindemello in https://github.com/Instagram/LibCST/pull/596 + - Full handling for applying type comments to Assign by @stroxler in https://github.com/Instagram/LibCST/pull/599 + - Add support for For and With by @stroxler in https://github.com/Instagram/LibCST/pull/607 + - Support FunctionDef transformations by @stroxler in https://github.com/Instagram/LibCST/pull/610 +- change pyo3 as optional dependency in native Python Parser by @sehz in https://github.com/Instagram/LibCST/pull/598 +- add slots to base classes, @add_slots takes bases into account by @ariebovenberg in https://github.com/Instagram/LibCST/pull/605 +- [native] Box most enums by @zsol in https://github.com/Instagram/LibCST/pull/632 +- [native] Return tuples instead of lists in CST nodes by @zsol in https://github.com/Instagram/LibCST/pull/631 + +## Fixed + +- Allow trailing whitespace without newline at EOF by @zsol in https://github.com/Instagram/LibCST/pull/611 +- Handle ast.parse failures when converting function type comments by @stroxler in https://github.com/Instagram/LibCST/pull/616 +- [native] Don't redundantly nest StarredElement inside another Element by @isidentical in https://github.com/Instagram/LibCST/pull/624 +- [native] Allow unparenthesized tuples inside f-strings by @isidentical in https://github.com/Instagram/LibCST/pull/621 +- Don't require whitespace right after match by @isidentical in https://github.com/Instagram/LibCST/pull/628 +- Proxy both parentheses in some pattern matching nodes by @isidentical in https://github.com/Instagram/LibCST/pull/626 + +# 0.4.0 - 2022-01-12 + +This release contains a new parsing infrastructure that is turned off by default. You +can enable it by setting the `LIBCST_PARSER_TYPE` environment variable to `native` +before parsing an input with the usual LibCST APIs. Parsing Python 3.10 documents is +only supported in this new mode. + +Note: the new parser is built as a native extension, so LibCST will ship with binary +wheels from now on. + +## Added + +- Implement a Python PEG parser in Rust by @zsol in [#566](https://github.com/Instagram/LibCST/pull/566) +- implement PEP-654: except\* by @zsol in [#571](https://github.com/Instagram/LibCST/pull/571) +- Implement PEP-634 - Match statement by @zsol in [#568](https://github.com/Instagram/LibCST/pull/568) +- Add instructions to codegen test failures by @stroxler in [#582](https://github.com/Instagram/LibCST/pull/582) +- Support Parenthesized With Statements by @stroxler in [#584](https://github.com/Instagram/LibCST/pull/584) +- Support relative imports in AddImportsVisitor by @martindemello in [#585](https://github.com/Instagram/LibCST/pull/585) +- Codemod for PEP 484 Assign w / type comments -> PEP 526 AnnAssign by @stroxler in [#594](https://github.com/Instagram/LibCST/pull/594) + +## Updated + +- Update license headers by @zsol in [#560](https://github.com/Instagram/LibCST/pull/560) +- Use precise signature matching when inserting function type annotations by @martindemello in [#591](https://github.com/Instagram/LibCST/pull/591) + +# 0.3.23 - 2021-11-23 + +## Fixed + +- Fix missing string annotation references [#561](https://github.com/Instagram/LibCST/pull/561) + +# 0.3.22 - 2021-11-22 + +## Added + +- Add --indent-string option to `libcst.tool print` [#525](https://github.com/Instagram/LibCST/pull/525) +- Publish pre-release packages to test.pypi.org [#550](https://github.com/Instagram/LibCST/pull/550) +- Add ImportAssignment class extending Assignment to record assignments for import statements [#554](https://github.com/Instagram/LibCST/pull/554) + +## Fixed + +- Various documentation fixes [#527](https://github.com/Instagram/LibCST/pull/527), [#529](https://github.com/Instagram/LibCST/pull/529) +- Do not add imports if we added no type info in ApplyTypeAnnotationVisitor [(commit)](https://github.com/Instagram/LibCST/commit/87625d02b6cb321c9c29ba1c67d81ce954a1a396) +- Support relative imports in ApplyTypeAnnotationVisitor qualifier handling [#538](https://github.com/Instagram/LibCST/pull/538) +- Don't gather metadata if the wrapper already contains it [#545](https://github.com/Instagram/LibCST/pull/545) +- Swallow parsing errors in string annotations [#548](https://github.com/Instagram/LibCST/pull/548) +- Stop parsing string annotations when no longer in a typing call [#546](https://github.com/Instagram/LibCST/pull/546) + +## Updated + +- Move find_qualified_names_for in the Assignment class [#557](https://github.com/Instagram/LibCST/pull/557) + +# 0.3.21 - 2021-09-21 + +## Fixed + +- Fix pyre command for type inference provider [#523](https://github.com/Instagram/LibCST/pull/523) + +## Updated + +- Change codegen to treat typing.Union[Foo, NoneType] and typing.Optional[Foo] as the same [#508]((https://github.com/Instagram/LibCST/pull/508) +- Rewrite the MatchIfTrue type to be generic on \_MatchIfTrueT [#512](https://github.com/Instagram/LibCST/pull/512) +- Add python3.9 to the CI [#506](https://github.com/Instagram/LibCST/pull/506) +- Various CI changes [#471](https://github.com/Instagram/LibCST/pull/471) [#510](https://github.com/Instagram/LibCST/pull/510) [#505](https://github.com/Instagram/LibCST/pull/505) [#515](https://github.com/Instagram/LibCST/pull/515) [#516](https://github.com/Instagram/LibCST/pull/516) + +# 0.3.20 - 2021-08-09 + +## Fixed + +- Don't reset subprocess environment to fix codemodding on windows [#495](https://github.com/Instagram/LibCST/pull/495) +- TypeAnnotationsVisitor: don't truncate function return type [#499](https://github.com/Instagram/LibCST/pull/499) +- Docs: Fix typo [#492](https://github.com/Instagram/LibCST/pull/492) + +# 0.3.19 - 2021-05-12 + +# Updated + +- Return more specific QNames for assignments [#477](https://github.com/Instagram/LibCST/pull/477) +- Tie accesses from string annotation to the string node [#483](https://github.com/Instagram/LibCST/pull/483) + +## Fixed + +- Fix leaking processes from TypeInferenceProvider [#474](https://github.com/Instagram/LibCST/pull/474) +- Fix TypeInferenceProvider breakage with empty cache [#476](https://github.com/Instagram/LibCST/pull/476) +- Fix formatting for link to QualifiedName class in docs [#480](https://github.com/Instagram/LibCST/pull/480) + +# 0.3.18 - 2021-03-29 + +## Added + +- Add FlattenSentinel to support replacing a statement with multiple statements [#455](https://github.com/Instagram/LibCST/pull/455) +- Add BuiltinScope [#469](https://github.com/Instagram/LibCST/pull/469) +- Add FullyQualifiedNameProvider [#465](https://github.com/Instagram/LibCST/pull/465) + +## Updated + +- Split QualifiedNameProvider out from libcst.metadata.scope_provider [#464](https://github.com/Instagram/LibCST/pull/464) + +## Fixed + +- Exception while parsing escape character in raw f-strings [#462](https://github.com/Instagram/LibCST/issues/462) + +# 0.3.17 - 2021-02-08 + +## Updated + +- Optimization: reduce the number of unused parallel processes [#440](https://github.com/Instagram/LibCST/pull/440) + +## Fixed + +- Walrus operator's left hand side now has STORE expression context [#443](https://github.com/Instagram/LibCST/pull/433) +- ApplyTypeAnnotationsVisitor applies parameter annotations even if no return type is declared [#445](https://github.com/Instagram/LibCST/pull/445) +- Work around Windows problem by using dummy pool for `jobs=1` [#436](https://github.com/Instagram/LibCST/pull/436) +- Remove extra unused imports added in other files [#453](https://github.com/Instagram/LibCST/pull/453) + +# 0.3.16 - 2020-12-16 + +## Added + +- Support PEP-604 style unions in decorator annotations [#429](https://github.com/Instagram/LibCST/pull/429) +- Gathering exports in augmented assignment statements [#426](https://github.com/Instagram/LibCST/pull/426) + +## Fixed + +- Don't allow out of order accesses in the global scope [#431](https://github.com/Instagram/LibCST/pull/431) +- Handle scope ordering in For statements [#430](https://github.com/Instagram/LibCST/pull/430) +- Fix for not parsing subscripts such as `cast()["from"]` [#428](https://github.com/Instagram/LibCST/pull/428) +- Walrus operator's left hand side now has STORE expression context [#433](https://github.com/Instagram/LibCST/pull/433) + +# 0.3.15 - 2020-12-01 + +## Added + +- Support Named Unicode Characters and yield in f-strings [#424](https://github.com/Instagram/LibCST/pull/424) + +## Fixed + +- Assignment/access ordering in comprehensions [#423](https://github.com/Instagram/LibCST/pull/423) +- Referencing of remaining objects in cast() [#422](https://github.com/Instagram/LibCST/pull/422) + # 0.3.14 - 2020-11-18 ## Fixed -- Fix is_annotation for types used in classdef base and assign value [#406](https://github.com/Instagram/LibCST/pull/406) -- Visit concatenated f-strings during scope analysis [#411](https://github.com/Instagram/LibCST/pull/411) -- Correct handling of walrus operator in function args [#417](https://github.com/Instagram/LibCST/pull/417) -- Allow generator expressions in f-strings [#419](https://github.com/Instagram/LibCST/pull/419) -- Keep track of assignment/access ordering during scope analysis [#413](https://github.com/Instagram/LibCST/pull/413) -- Handle string type references in cast() during scope analysis [#418](https://github.com/Instagram/LibCST/pull/418) + +- Fix is_annotation for types used in classdef base and assign value [#406](https://github.com/Instagram/LibCST/pull/406) +- Visit concatenated f-strings during scope analysis [#411](https://github.com/Instagram/LibCST/pull/411) +- Correct handling of walrus operator in function args [#417](https://github.com/Instagram/LibCST/pull/417) +- Allow generator expressions in f-strings [#419](https://github.com/Instagram/LibCST/pull/419) +- Keep track of assignment/access ordering during scope analysis [#413](https://github.com/Instagram/LibCST/pull/413) +- Handle string type references in cast() during scope analysis [#418](https://github.com/Instagram/LibCST/pull/418) # 0.3.13 - 2020-10-12 ## Fixed -- Use correct type for AugAssign and AnnAssign target [#396](https://github.com/Instagram/LibCST/pull/396) -- Support string annotations for type aliases [#401](https://github.com/Instagram/LibCST/pull/401) + +- Use correct type for AugAssign and AnnAssign target [#396](https://github.com/Instagram/LibCST/pull/396) +- Support string annotations for type aliases [#401](https://github.com/Instagram/LibCST/pull/401) # 0.3.12 - 2020-10-01 ## Fixed -- fix RemoveImportsVisitor crash when ImportAlias is inserted without comma [#397](https://github.com/Instagram/LibCST/pull/397) -- Provide STORE for {Class,Function}Def.name in ExpressionContextProvider [#394](https://github.com/Instagram/LibCST/pull/394) + +- fix RemoveImportsVisitor crash when ImportAlias is inserted without comma [#397](https://github.com/Instagram/LibCST/pull/397) +- Provide STORE for {Class,Function}Def.name in ExpressionContextProvider [#394](https://github.com/Instagram/LibCST/pull/394) # 0.3.11 - 2020-09-29 ## Added -- Implement TypeOf matcher [#384](https://github.com/Instagram/LibCST/pull/384) + +- Implement TypeOf matcher [#384](https://github.com/Instagram/LibCST/pull/384) ## Updated -- Update return type of ParentNodeProvider to be CSTNode [#377](https://github.com/Instagram/LibCST/pull/377) -- Add source code links to each class/function [#378](https://github.com/Instagram/LibCST/pull/378) + +- Update return type of ParentNodeProvider to be CSTNode [#377](https://github.com/Instagram/LibCST/pull/377) +- Add source code links to each class/function [#378](https://github.com/Instagram/LibCST/pull/378) ## Fixed -- Removing an import alias with a trailing standalone comment should preserve the comment [#392](https://github.com/Instagram/LibCST/pull/392) + +- Removing an import alias with a trailing standalone comment should preserve the comment [#392](https://github.com/Instagram/LibCST/pull/392) # 0.3.10 - 2020-09-17 ## Added -- Handle string annotations in ScopeProvider [#373](https://github.com/Instagram/LibCST/pull/373) -- Add is_annotation subtype for Access inreferences. [#372](https://github.com/Instagram/LibCST/pull/372) + +- Handle string annotations in ScopeProvider [#373](https://github.com/Instagram/LibCST/pull/373) +- Add is_annotation subtype for Access inreferences. [#372](https://github.com/Instagram/LibCST/pull/372) ## Updated -- Call pyre query with noninteractive logging [#371](https://github.com/Instagram/LibCST/pull/371) -- Replace matchers with explicit visitation in gatherers [#366](https://github.com/Instagram/LibCST/pull/366) -- Include missing test data in install [#365](https://github.com/Instagram/LibCST/pull/365) + +- Call pyre query with noninteractive logging [#371](https://github.com/Instagram/LibCST/pull/371) +- Replace matchers with explicit visitation in gatherers [#366](https://github.com/Instagram/LibCST/pull/366) +- Include missing test data in install [#365](https://github.com/Instagram/LibCST/pull/365) ## Fixed -- Spaces around walrus operator are not required [#368](https://github.com/Instagram/LibCST/pull/368) -- SaveMachedNode now matches with trailing empty wildcards [#356](https://github.com/Instagram/LibCST/pull/356) -- Correctly extract wildcard matchers [#355](https://github.com/Instagram/LibCST/pull/355) + +- Spaces around walrus operator are not required [#368](https://github.com/Instagram/LibCST/pull/368) +- SaveMachedNode now matches with trailing empty wildcards [#356](https://github.com/Instagram/LibCST/pull/356) +- Correctly extract wildcard matchers [#355](https://github.com/Instagram/LibCST/pull/355) # 0.3.9 - 2020-09-07 ## Added - - Support string type annotations in RemoveUnusedImports [#353](https://github.com/Instagram/LibCST/pull/353) - - Add scope to ImportAlias [#350](https://github.com/Instagram/LibCST/pull/350) - - Add scope to ClassDef [#349](https://github.com/Instagram/LibCST/pull/349) + +- Support string type annotations in RemoveUnusedImports [#353](https://github.com/Instagram/LibCST/pull/353) +- Add scope to ImportAlias [#350](https://github.com/Instagram/LibCST/pull/350) +- Add scope to ClassDef [#349](https://github.com/Instagram/LibCST/pull/349) ## Fixed - - Fixed all pyre related errors [#360](https://github.com/Instagram/LibCST/pull/360) - - Fixed enclosing attribute for attributes in call arguments [#362](https://github.com/Instagram/LibCST/pull/362) + +- Fixed all pyre related errors [#360](https://github.com/Instagram/LibCST/pull/360) +- Fixed enclosing attribute for attributes in call arguments [#362](https://github.com/Instagram/LibCST/pull/362) # 0.3.8 - 2020-07-22 ## Added - - Handle type subscripts when applying annotations. [#335](https://github.com/Instagram/LibCST/pull/335) - - Added FullRepoManager `cache` property [#330](https://github.com/Instagram/LibCST/pull/330) - - Added optional args for tox commands [#327](https://github.com/Instagram/LibCST/pull/327) + +- Handle type subscripts when applying annotations. [#335](https://github.com/Instagram/LibCST/pull/335) +- Added FullRepoManager `cache` property [#330](https://github.com/Instagram/LibCST/pull/330) +- Added optional args for tox commands [#327](https://github.com/Instagram/LibCST/pull/327) ## Updated - - Only remove trailing comma if the last alias is removed [#334](https://github.com/Instagram/LibCST/pull/334) + +- Only remove trailing comma if the last alias is removed [#334](https://github.com/Instagram/LibCST/pull/334) ## Fixed - - Fixed inserting imports after module docstring [#343](https://github.com/Instagram/LibCST/pull/343) - - Fixed ParenthesizedWhitespace before params in FuncDef [#342](https://github.com/Instagram/LibCST/pull/342) - - Fixed validation for ImportAlias and Try statements [#340](https://github.com/Instagram/LibCST/pull/340) - - Fixed NotEqual position issue [#325](https://github.com/Instagram/LibCST/pull/325) - - Fixed minor typo in scope_provider.py [#324](https://github.com/Instagram/LibCST/pull/324) + +- Fixed inserting imports after module docstring [#343](https://github.com/Instagram/LibCST/pull/343) +- Fixed ParenthesizedWhitespace before params in FuncDef [#342](https://github.com/Instagram/LibCST/pull/342) +- Fixed validation for ImportAlias and Try statements [#340](https://github.com/Instagram/LibCST/pull/340) +- Fixed NotEqual position issue [#325](https://github.com/Instagram/LibCST/pull/325) +- Fixed minor typo in scope_provider.py [#324](https://github.com/Instagram/LibCST/pull/324) # 0.3.7 - 2020-06-24 ## Added - - Added `RenameCommand` to rename all instances of a local or imported object to a specified new name. [#308](https://github.com/Instagram/LibCST/pull/308) + +- Added `RenameCommand` to rename all instances of a local or imported object to a specified new name. [#308](https://github.com/Instagram/LibCST/pull/308) ## Updated - - Upgraded Codecov dev dependency to 2.1.4. [#311](https://github.com/Instagram/LibCST/pull/311) - - Enabled Pyre `strict` mode by default. [#313](https://github.com/Instagram/LibCST/pull/313) + +- Upgraded Codecov dev dependency to 2.1.4. [#311](https://github.com/Instagram/LibCST/pull/311) +- Enabled Pyre `strict` mode by default. [#313](https://github.com/Instagram/LibCST/pull/313) ## Fixed - - Fixed `ImportError` under Python 3.9. [#306](https://github.com/Instagram/LibCST/pull/306) - - Fixed `stdout` being plugged into successfully codemod-ed files. [#309](https://github.com/Instagram/LibCST/pull/309) - - Fixed `QualifiedName` retrieval for names with repeated substrings. [#312](https://github.com/Instagram/LibCST/pull/312) - - Fixed default values of keyword-only and positional-only arguments in `ApplyTypeAnnotationsVisitor`. [#314](https://github.com/Instagram/LibCST/pull/314) - - Fixed `ExpressionContextProvider` by giving subscript values a `LOAD`context. [#319](https://github.com/Instagram/LibCST/pull/319) + +- Fixed `ImportError` under Python 3.9. [#306](https://github.com/Instagram/LibCST/pull/306) +- Fixed `stdout` being plugged into successfully codemod-ed files. [#309](https://github.com/Instagram/LibCST/pull/309) +- Fixed `QualifiedName` retrieval for names with repeated substrings. [#312](https://github.com/Instagram/LibCST/pull/312) +- Fixed default values of keyword-only and positional-only arguments in `ApplyTypeAnnotationsVisitor`. [#314](https://github.com/Instagram/LibCST/pull/314) +- Fixed `ExpressionContextProvider` by giving subscript values a `LOAD`context. [#319](https://github.com/Instagram/LibCST/pull/319) # 0.3.6 - 2020-05-27 ## Added - - Added `ConvertNamedTupleToDataclassCommand` to convert `NamedTuple` class declarations to Python 3.7 `dataclasses` using the `@dataclass(frozen=True)` decorator. [#299](https://github.com/Instagram/LibCST/pull/299) + +- Added `ConvertNamedTupleToDataclassCommand` to convert `NamedTuple` class declarations to Python 3.7 `dataclasses` using the `@dataclass(frozen=True)` decorator. [#299](https://github.com/Instagram/LibCST/pull/299) ## Fixed - - Fixed typo in file name `libcst/codemod/commands/convert_percent_format_to_fstring.py`. [#301](https://github.com/Instagram/LibCST/pull/301) - - Fixed `StopIteration` exception during scope analysis matching on import names. [#302](https://github.com/Instagram/LibCST/pull/302) + +- Fixed typo in file name `libcst/codemod/commands/convert_percent_format_to_fstring.py`. [#301](https://github.com/Instagram/LibCST/pull/301) +- Fixed `StopIteration` exception during scope analysis matching on import names. [#302](https://github.com/Instagram/LibCST/pull/302) # 0.3.5 - 2020-05-12 ## Updated - - Expose more granular `Assignments` and `Accesses` for dotted imports in `ScopeProvider`. [#284](https://github.com/Instagram/LibCST/pull/284) - - `get_qualified_names_for` returns the most appropriate qualified name. [#290](https://github.com/Instagram/LibCST/pull/290) - - Surface `SyntaxError` raised by formatter in codemod run. [#288](https://github.com/Instagram/LibCST/pull/288) [#289](https://github.com/Instagram/LibCST/pull/289) - - Rename `ApplyTypeAnnotationsVisitor.add_stub_to_context` as `ApplyTypeAnnotationsVisitor.store_stub_in_context` and add `overwrite_existing_annotations` to allow overwrite existing type annotations. [#289](https://github.com/Instagram/LibCST/pull/291) + +- Expose more granular `Assignments` and `Accesses` for dotted imports in `ScopeProvider`. [#284](https://github.com/Instagram/LibCST/pull/284) +- `get_qualified_names_for` returns the most appropriate qualified name. [#290](https://github.com/Instagram/LibCST/pull/290) +- Surface `SyntaxError` raised by formatter in codemod run. [#288](https://github.com/Instagram/LibCST/pull/288) [#289](https://github.com/Instagram/LibCST/pull/289) +- Rename `ApplyTypeAnnotationsVisitor.add_stub_to_context` as `ApplyTypeAnnotationsVisitor.store_stub_in_context` and add `overwrite_existing_annotations` to allow overwrite existing type annotations. [#289](https://github.com/Instagram/LibCST/pull/291) ## Fixed - - Close opened file handles on finishing codemod to avoid `Too many open files` on OSX. [#283](https://github.com/Instagram/LibCST/pull/283) + +- Close opened file handles on finishing codemod to avoid `Too many open files` on OSX. [#283](https://github.com/Instagram/LibCST/pull/283) ## Deprecated - - `ApplyTypeAnnotationsVisitor.add_stub_to_context` is renamed as `ApplyTypeAnnotationsVisitor.store_stub_in_context`. + +- `ApplyTypeAnnotationsVisitor.add_stub_to_context` is renamed as `ApplyTypeAnnotationsVisitor.store_stub_in_context`. # 0.3.4 - 2020-03-27 ## Added - - Supported CST parsing for Python 3.0, 3.1 and 3.3. [#261](https://github.com/Instagram/LibCST/pull/261) - - Added `RemoveUnusedImportsCommand` for removing unused import codemod. [#266](https://github.com/Instagram/LibCST/pull/266) - - Added `ApplyTypeAnnotationsVisitor.add_stub_to_context` for apply type annotations from stub modules. [#265](https://github.com/Instagram/LibCST/pull/265) + +- Supported CST parsing for Python 3.0, 3.1 and 3.3. [#261](https://github.com/Instagram/LibCST/pull/261) +- Added `RemoveUnusedImportsCommand` for removing unused import codemod. [#266](https://github.com/Instagram/LibCST/pull/266) +- Added `ApplyTypeAnnotationsVisitor.add_stub_to_context` for apply type annotations from stub modules. [#265](https://github.com/Instagram/LibCST/pull/265) ## Updated - - Improved exception message of `get_metadata` when MetadataWrapper is not used. [#257](https://github.com/Instagram/LibCST/pull/257) - - New steps for Pyre type check in README.rst which analyzes installed Python sources for better type checking. [#262](https://github.com/Instagram/LibCST/pull/262) + +- Improved exception message of `get_metadata` when MetadataWrapper is not used. [#257](https://github.com/Instagram/LibCST/pull/257) +- New steps for Pyre type check in README.rst which analyzes installed Python sources for better type checking. [#262](https://github.com/Instagram/LibCST/pull/262) ## Fixed - - Parsed `except(Exception):` correctly while there is no space after except syntax. [#256](https://github.com/Instagram/LibCST/pull/256) - - Fixed `RemoveImportsVisitor` to not remove imports when references still exist. [#264](https://github.com/Instagram/LibCST/pull/264) - - Fixed missing type annotations. [#271](https://github.com/Instagram/LibCST/pull/271) - - `AddImportsVisitor` generates deterministic order for added imports. [#274](https://github.com/Instagram/LibCST/pull/274) + +- Parsed `except(Exception):` correctly while there is no space after except syntax. [#256](https://github.com/Instagram/LibCST/pull/256) +- Fixed `RemoveImportsVisitor` to not remove imports when references still exist. [#264](https://github.com/Instagram/LibCST/pull/264) +- Fixed missing type annotations. [#271](https://github.com/Instagram/LibCST/pull/271) +- `AddImportsVisitor` generates deterministic order for added imports. [#274](https://github.com/Instagram/LibCST/pull/274) # 0.3.3 - 2020-03-05 ## Added - - `ByteSpanPositionProvider` provides start offset and length of CSTNode as metadata. - - `get_docstring` helper provides docstring from `Module`, `ClassDef` and `FunctionDef` node types. + +- `ByteSpanPositionProvider` provides start offset and length of CSTNode as metadata. +- `get_docstring` helper provides docstring from `Module`, `ClassDef` and `FunctionDef` node types. ## Updated - - Optimized `ScopeProvider` performance to run faster and use less memory: - - remove unnecessary `Assignment` of keyword `Arg`. - - don't provide scope object for formatting information nodes. - - batch set union updates in `infer_accesses` step. + +- Optimized `ScopeProvider` performance to run faster and use less memory: + - remove unnecessary `Assignment` of keyword `Arg`. + - don't provide scope object for formatting information nodes. + - batch set union updates in `infer_accesses` step. ## Fixed - - Fixed `_assignments` mutation when calling read-only `Scope.get_qualified_names_for` and `__contains__`. + +- Fixed `_assignments` mutation when calling read-only `Scope.get_qualified_names_for` and `__contains__`. # 0.3.2 - 2020-02-24 ## Added - - Added `RemoveImportsVisitor` to remove an import if it's not used in a module. - - Added `GatherExportsVisitor` to gather exports specified in `__all__`. - - Added property helpers `evaluated_name` and `evaluated_name` in `ImportAlias`. - - Added helper to get full module name: `get_absolute_module_for_import` and `get_absolute_module_for_import_or_raise`. - - Added `CodemodContext.full_module_name` for full dotted module name. - - Added format specifiers f-string conversion support to `ConvertFormatStringCommand`. + +- Added `RemoveImportsVisitor` to remove an import if it's not used in a module. +- Added `GatherExportsVisitor` to gather exports specified in `__all__`. +- Added property helpers `evaluated_name` and `evaluated_name` in `ImportAlias`. +- Added helper to get full module name: `get_absolute_module_for_import` and `get_absolute_module_for_import_or_raise`. +- Added `CodemodContext.full_module_name` for full dotted module name. +- Added format specifiers f-string conversion support to `ConvertFormatStringCommand`. ## Updated - - Moved LibCST version to `_version.py` and can print it by `python -m libcst.tool --version`. - - Improved `EnsureImportPresentCommand` with `--alias` option. - - Improved `ConvertFormatStringCommand` with `--allow-strip-comments` and `--allow-await` options. + +- Moved LibCST version to `_version.py` and can print it by `python -m libcst.tool --version`. +- Improved `EnsureImportPresentCommand` with `--alias` option. +- Improved `ConvertFormatStringCommand` with `--allow-strip-comments` and `--allow-await` options. # 0.3.1 - 2020-02-06 ## Added - - Added helpers to get both the raw and evaluated value of a SimpleString. - - Added helpers to get the quoting and prefix of SimpleString and FormattedString. - - Added a helper to get the evaluated value of number types. - - Added templated parsers for statement/expression/module to make constructing updated nodes in transforms easier. - - FullRepoManager is now integrated into codemods, so metadata requiring full repo analysis can now be used. - - Added `get_full_name_for_node_or_raise` helper to remove boilerplate of checking against `None`. +- Added helpers to get both the raw and evaluated value of a SimpleString. +- Added helpers to get the quoting and prefix of SimpleString and FormattedString. +- Added a helper to get the evaluated value of number types. +- Added templated parsers for statement/expression/module to make constructing updated nodes in transforms easier. +- FullRepoManager is now integrated into codemods, so metadata requiring full repo analysis can now be used. +- Added `get_full_name_for_node_or_raise` helper to remove boilerplate of checking against `None`. ## Updated - - Upgraded Pyre dependency to 0.0.41. - - Added additional status to `libcst codemod` command. - - `get_full_name_for_node` now supports decorators. +- Upgraded Pyre dependency to 0.0.41. +- Added additional status to `libcst codemod` command. +- `get_full_name_for_node` now supports decorators. ## Fixed - - Clarified documentation around f-strings, fixed indentation. - - Fixed `libcst list` crashing if a codemod does unsafe work on import. - - Fixed deploy-time dependencies so pyyaml won't have to be manually installed to execute codemods. - - QualifiedNameProvider no longer erroneously claims names inside attributes are built-ins. +- Clarified documentation around f-strings, fixed indentation. +- Fixed `libcst list` crashing if a codemod does unsafe work on import. +- Fixed deploy-time dependencies so pyyaml won't have to be manually installed to execute codemods. +- QualifiedNameProvider no longer erroneously claims names inside attributes are built-ins. # 0.3.0 - 2020-01-16 ## Added - - Added support for parsing and rendering Python 3.8 source code. - - Added more documentation for codemods. - - Added `get_full_name_for_expression` helper method. - - Added `has_name` helper to `QualifiedNameProvider`. - - Added a `--python-version` flag to `libcst.tool print` utility. +- Added support for parsing and rendering Python 3.8 source code. +- Added more documentation for codemods. +- Added `get_full_name_for_expression` helper method. +- Added `has_name` helper to `QualifiedNameProvider`. +- Added a `--python-version` flag to `libcst.tool print` utility. ## Updated - - Codemod command can now discover codemods in subdirectories of configured modules. - - Updgraded Pyre dependency to 0.0.39. +- Codemod command can now discover codemods in subdirectories of configured modules. +- Updgraded Pyre dependency to 0.0.39. ## Fixed - - Cleaned up some typos and formatting issues in comments and documentation. - - Cleaned up a few redundant typevars. - - Fixed callable typing in matchers implementation. - - Fixed incorrect base class references in matcher decorator attribute visitors. - - Fixed codemod test assertion failing for some whitespace edge cases. - - Fixed scope analysis to track variable usage on `del` statements. +- Cleaned up some typos and formatting issues in comments and documentation. +- Cleaned up a few redundant typevars. +- Fixed callable typing in matchers implementation. +- Fixed incorrect base class references in matcher decorator attribute visitors. +- Fixed codemod test assertion failing for some whitespace edge cases. +- Fixed scope analysis to track variable usage on `del` statements. ## Deprecated - - Deprecated exporting `ensure_type` from `libcst` in favor of `libcst.helpers`. +- Deprecated exporting `ensure_type` from `libcst` in favor of `libcst.helpers`. ## Removed - - Removed `ExtSlice` and helper code in favor of `SubscriptElement`. - - Removed `default_params` attribute on `Parameters`. - - Removed `SyntacticPositionProvider` and `BasicPositionProvider`. - - Removed `CodePosition` and `CodeRange` exports on `libcst` in favor of `libcst.metadata`. +- Removed `ExtSlice` and helper code in favor of `SubscriptElement`. +- Removed `default_params` attribute on `Parameters`. +- Removed `SyntacticPositionProvider` and `BasicPositionProvider`. +- Removed `CodePosition` and `CodeRange` exports on `libcst` in favor of `libcst.metadata`. # 0.2.7 - 2020-01-07 ## Updated - - Command-line interface now shows rough estimate of time remaining while executing a codemod. - - Add needed import now supports import aliases. +- Command-line interface now shows rough estimate of time remaining while executing a codemod. +- Add needed import now supports import aliases. # 0.2.6 - 2020-01-01 ## Added - - Added Codemod framework for running code transform over a codebase in parallel. - - Codemod for code transform logic. - - CodemodContext for preserving states across transforms. - - CodemodCommand for CLI interface. - - CodemodTest for testing codemod easily. - - yaml codemod config. - - Pre-build commands in codemod/commands/. - - Added TypeInferenceProvider for inferred type info from Pyre. A regression test suite was included. - - Added FullRepoManager for metadata inter-process cache handing. +- Added Codemod framework for running code transform over a codebase in parallel. + - Codemod for code transform logic. + - CodemodContext for preserving states across transforms. + - CodemodCommand for CLI interface. + - CodemodTest for testing codemod easily. + - yaml codemod config. + - Pre-build commands in codemod/commands/. +- Added TypeInferenceProvider for inferred type info from Pyre. A regression test suite was included. +- Added FullRepoManager for metadata inter-process cache handing. ## Fixed - - Fixed usage link in README. - - Fixed type annotation for Mypy compatibility. +- Fixed usage link in README. +- Fixed type annotation for Mypy compatibility. ## Updated - - Upgraded Pyre to 0.0.38 +- Upgraded Pyre to 0.0.38 # 0.2.5 - 2019-12-05 ## Added - - Added `extract`, `extractall` and `replace` functions to Matchers API. +- Added `extract`, `extractall` and `replace` functions to Matchers API. ## Fixed - - Fixed length restrictions for `AllOf` and `OneOf` so that they can be used with sequence expansion operators. - - Fixed batchable visitors not calling attribute visit functions. - - Fixed typos in docstrings. - - Fixed matcher type exception not being pickleable. +- Fixed length restrictions for `AllOf` and `OneOf` so that they can be used with sequence expansion operators. +- Fixed batchable visitors not calling attribute visit functions. +- Fixed typos in docstrings. +- Fixed matcher type exception not being pickleable. ## Deprecated - - Deprecated parsing function parameters with defaults into `default_params` attribute. They can be found in the `params` attribute instead. +- Deprecated parsing function parameters with defaults into `default_params` attribute. They can be found in the `params` attribute instead. # 0.2.4 - 2019-11-13 ## Fixed - - Fixed broken types for sequence matchers. +- Fixed broken types for sequence matchers. # 0.2.3 - 2019-11-11 ## Added - - Preliminary support for 3.8 walrus operator. - - CI config and fuzz tests for 3.8. - - Experimental re-entrant codegen API. - - Added `unsafe_skip_copy` optimization to `MetadataWrapper`. - - Matchers API now includes a `findall` function. - - Matchers now have a `MatchMetadataIfTrue` special matcher. +- Preliminary support for 3.8 walrus operator. +- CI config and fuzz tests for 3.8. +- Experimental re-entrant codegen API. +- Added `unsafe_skip_copy` optimization to `MetadataWrapper`. +- Matchers API now includes a `findall` function. +- Matchers now have a `MatchMetadataIfTrue` special matcher. ## Updated - - Updated to latest Black release. - - Better type documentation for generated matchers. +- Updated to latest Black release. +- Better type documentation for generated matchers. ## Fixed - - Clarified matchers documentation in several confusing areas. - - Drastically sped up codegen and tests. - - `QualifiedName` now supports imported attributtes. - - `ExpressionContext` properly marks loop variables as `STORE`. - - Various typos in documentation are fixed. +- Clarified matchers documentation in several confusing areas. +- Drastically sped up codegen and tests. +- `QualifiedName` now supports imported attributtes. +- `ExpressionContext` properly marks loop variables as `STORE`. +- Various typos in documentation are fixed. ## Deprecated - - Deprecated `BasicPositionProvider` and `SyntacticPositionProvider` in favor of `WhitespaceInclusivePositionProvider` and `PositionProvider`. +- Deprecated `BasicPositionProvider` and `SyntacticPositionProvider` in favor of `WhitespaceInclusivePositionProvider` and `PositionProvider`. # 0.2.2 - 2019-10-24 ## Added - - Added `deep_with_changes` helper method on CSTNode. - - Added metadata support to matchers. - - Added ability to get the defining node from a `LocalScope` (`FunctionScope`, `ClassScope` or `ComprehensionScope`). +- Added `deep_with_changes` helper method on CSTNode. +- Added metadata support to matchers. +- Added ability to get the defining node from a `LocalScope` (`FunctionScope`, `ClassScope` or `ComprehensionScope`). ## Updated - - Many changes to LibCST documentation including a new best practices page and updated scope tutorial. - - Exported `CodePosition` and `CodeRange` from `libcst.metadata` instead of `libcst`. +- Many changes to LibCST documentation including a new best practices page and updated scope tutorial. +- Exported `CodePosition` and `CodeRange` from `libcst.metadata` instead of `libcst`. ## Fixed - - Disallowed decorating a concrete visit or leave method with `@visit` or `@leave` decorators. - - Renamed position provider classes to be more self-explanatory. - - Fixed trailing newline detection when the last character in a file was from a continuation. - - Fixed `deep_clone` potentially blowing the stack with large LibCST trees. +- Disallowed decorating a concrete visit or leave method with `@visit` or `@leave` decorators. +- Renamed position provider classes to be more self-explanatory. +- Fixed trailing newline detection when the last character in a file was from a continuation. +- Fixed `deep_clone` potentially blowing the stack with large LibCST trees. ## Deprecated - - Deprecated `ExtSlice` in favor of `SubscriptElement`. - - Deprecated parsing `Subscript` slices directly into `Index` or `Slice` nodes. +- Deprecated `ExtSlice` in favor of `SubscriptElement`. +- Deprecated parsing `Subscript` slices directly into `Index` or `Slice` nodes. # 0.2.1 - 2019-10-14 ## Added - - `Scope.assignments` and `Scope.accesses` APIs to access all references in a scope. - - Scope analysis tutorial. +- `Scope.assignments` and `Scope.accesses` APIs to access all references in a scope. +- Scope analysis tutorial. ## Updated - - Supported `` in `Scope.get_qualified_names_for` and `QualifiedName`. - - Enforced identity equality for matchers and immutability of non-dataclass matchers. - - Generalize codegen cleanup steps for all codegen. +- Supported `` in `Scope.get_qualified_names_for` and `QualifiedName`. +- Enforced identity equality for matchers and immutability of non-dataclass matchers. +- Generalize codegen cleanup steps for all codegen. ## Fixed - - Made `BatchableMetadataProvider` typing covariant over its typevar. - - Fixed LICENSE header on generated matcher file. - - Cleanup unused internal noqa and on-call specification. + +- Made `BatchableMetadataProvider` typing covariant over its typevar. +- Fixed LICENSE header on generated matcher file. +- Cleanup unused internal noqa and on-call specification. # 0.2.0 - 2019-10-04 ## Added - - Added matchers which allow comparing LibCST trees against arbitrary patterns. - - Improved tree manipulation with `deep_remove` and `deep_replace` helper methods on CSTNode. - - Added new metadata providers: parent node and qualified name. +- Added matchers which allow comparing LibCST trees against arbitrary patterns. +- Improved tree manipulation with `deep_remove` and `deep_replace` helper methods on CSTNode. +- Added new metadata providers: parent node and qualified name. ## Updated - - Updated Pyre to latest release. - - Updated scope metadata to provide additional helpers. - - Updated preferred method of removing a node from its parent in a visitor. +- Updated Pyre to latest release. +- Updated scope metadata to provide additional helpers. +- Updated preferred method of removing a node from its parent in a visitor. ## Fixed - - Metadata classes and methods are now exported from "libcst.metadata" instead of several submodules. - - Fixed LICENSE file to explicitly reference individual files in the repo with different licenses. - - Fixed `deep_clone` to correctly clone leaf nodes. - - Fixed all parse entrypoints to always return a tree with no duplicated leaf nodes. +- Metadata classes and methods are now exported from "libcst.metadata" instead of several submodules. +- Fixed LICENSE file to explicitly reference individual files in the repo with different licenses. +- Fixed `deep_clone` to correctly clone leaf nodes. +- Fixed all parse entrypoints to always return a tree with no duplicated leaf nodes. # 0.1.3 - 2019-09-18 ## Added - - Added preliminary support for parsing Python 3.5 and Python 3.6 source. - - Added scope analysis metadata provider. - - Added mypy type support for built package. +- Added preliminary support for parsing Python 3.5 and Python 3.6 source. +- Added scope analysis metadata provider. +- Added mypy type support for built package. ## Fixed - - Several typos in documentation are fixed. +- Several typos in documentation are fixed. # 0.1.2 - 2019-08-29 ## Added - - Added attribute visitor hooks. - - Added base visit/leave methods which can be subclassed. - - Hypothesis fuzz testing suite, courtesy of Zac Hatfield-Dodds. +- Added attribute visitor hooks. +- Added base visit/leave methods which can be subclassed. +- Hypothesis fuzz testing suite, courtesy of Zac Hatfield-Dodds. ## Fixed - - Metadata documentation is much more complete. - - Fixed several whitespace validation issues caught by Hypothesis. - - Parser syntax errors are now used inside parser. +- Metadata documentation is much more complete. +- Fixed several whitespace validation issues caught by Hypothesis. +- Parser syntax errors are now used inside parser. # 0.1.1 - 2019-08-20 ## Added -- Metadata interface is now exported. +- Metadata interface is now exported. ## Fixed -- Dependencies are now specified with minimum revisions. -- Lots of documentation fixes. +- Dependencies are now specified with minimum revisions. +- Lots of documentation fixes. # 0.1 - 2019-07-23 ## Added - - First public release of LibCST. - - Complete, fully typed syntax tree for Python 3.6. - - Full suite of tests for each defined node type. +- First public release of LibCST. +- Complete, fully typed syntax tree for Python 3.6. +- Full suite of tests for each defined node type. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 0f7ad8bf..83f431e8 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,5 +1,80 @@ # Code of Conduct -Facebook has adopted a Code of Conduct that we expect project participants to adhere to. -Please read the [full text](https://code.fb.com/codeofconduct/) -so that you can understand what actions will and will not be tolerated. +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 41a47707..2e35431d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,18 +3,38 @@ We want to make contributing to this project as easy and transparent as possible. ## Our Development Process -This github repo is the source of truth and all changes need to be reviewed in +This github repo is the source of truth and all changes need to be reviewed in pull requests. ## Pull Requests We actively welcome your pull requests. -1. Fork the repo and create your branch from `master`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes by `tox test`. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). +### Setup Your Environment + +1. Install a [Rust toolchain](https://rustup.rs) and [uv](https://docs.astral.sh/uv/) +2. Fork the repo on your side +3. Clone the repo + > git clone [your fork.git] libcst + > cd libcst +4. Sync with the main libcst version package + > git fetch --tags https://github.com/instagram/libcst +5. Setup the env + > uv sync + +You are now ready to create your own branch from main, and contribute. +Please provide tests (using unittest), and update the documentation (both docstrings +and sphinx doc), if applicable. + +### Before Submitting Your Pull Request + +1. Format your code + > uv run poe format +2. Run the type checker + > uv run poe typecheck +3. Test your changes + > uv run poe test +4. Check linters + > uv run poe lint ## Contributor License Agreement ("CLA") In order to accept your pull request, we need you to submit a CLA. You only need @@ -30,8 +50,8 @@ Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe disclosure of security bugs. In those cases, please go through the process outlined on that page and do not file a public issue. -## Coding Style -We use flake8, isort and black to enforce coding style. +## Coding Style +We use flake8 and ufmt to enforce coding style. ## License By contributing to LibCST, you agree that your contributions will be licensed diff --git a/LICENSE b/LICENSE index 0c823502..5594616f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ All contributions towards LibCST are MIT licensed. -Some Python files have been taken from the standard library and are therefore +Some Python files have been derived from the standard library and are therefore PSF licensed. Modifications on these files are dual licensed (both MIT and PSF). These files are: @@ -8,11 +8,13 @@ PSF). These files are: - libcst/_parser/parso/utils.py - libcst/_parser/parso/pgen2/generator.py - libcst/_parser/parso/pgen2/grammar_parser.py -- libcst/_parser/parso/python/token.py +- libcst/_parser/parso/python/py_token.py - libcst/_parser/parso/python/tokenize.py - libcst/_parser/parso/tests/test_fstring.py - libcst/_parser/parso/tests/test_tokenize.py - libcst/_parser/parso/tests/test_utils.py +- native/libcst/src/tokenizer/core/mod.rs +- native/libcst/src/tokenizer/core/string_types.rs Some Python files have been taken from dataclasses and are therefore Apache licensed. Modifications on these files are licensed under Apache 2.0 license. @@ -24,7 +26,7 @@ These files are: MIT License -Copyright (c) Facebook, Inc. and its affiliates. +Copyright (c) Meta Platforms, Inc. and affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/MAINTAINERS.md b/MAINTAINERS.md new file mode 100644 index 00000000..a7d79006 --- /dev/null +++ b/MAINTAINERS.md @@ -0,0 +1,12 @@ +# How to make a new release + +1. Add a new entry to `CHANGELOG.md` (I normally use the [new release page](https://github.com/Instagram/LibCST/releases/new) to generate a changelog, then manually group) + 1. Follow the existing format: `Fixed`, `Added`, `Updated`, `Deprecated`, `Removed`, `New Contributors` sections, and the full changelog link at the bottom. + 1. Mention only user-visible changes - improvements to CI, tests, or development workflow aren't noteworthy enough + 1. Version bumps are generally not worth mentioning with some notable exceptions (like pyo3) + 1. Group related PRs into one bullet point if it makes sense +2. manually bump versions in `Cargo.toml` files in the repo +3. run `cargo update -p libcst` +4. make a new PR with the above changes, get it reviewed and landed +5. make a new release on Github, create a new tag on publish, and copy the contents of the changelog entry in there +6. after publishing, check out the repo at the new tag, and run `cd native; cargo +nightly publish -Z package-workspace -p libcst_derive -p libcst` diff --git a/MANIFEST.in b/MANIFEST.in index 4402255d..8fd03bd0 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1,5 @@ -include README.rst LICENSE CODE_OF_CONDUCT.md CONTRIBUTING.md requirements.txt requirements-dev.txt docs/source/*.rst libcst/py.typed +include README.rst LICENSE CODE_OF_CONDUCT.md CONTRIBUTING.md docs/source/*.rst libcst/py.typed + +include native/Cargo.toml +recursive-include native * +recursive-exclude native/target * \ No newline at end of file diff --git a/README.rst b/README.rst index 6d477eae..aaff5f41 100644 --- a/README.rst +++ b/README.rst @@ -4,19 +4,19 @@ A Concrete Syntax Tree (CST) parser and serializer library for Python -|readthedocs-badge| |circleci-badge| |codecov-badge| |pypi-badge| |pypi-download| |notebook-badge| +|support-ukraine| |readthedocs-badge| |ci-badge| |pypi-badge| |pypi-download| |notebook-badge| |types-badge| -.. |readthedocs-badge| image:: https://readthedocs.org/projects/pip/badge/?version=latest&style=flat +.. |support-ukraine| image:: https://img.shields.io/badge/Support-Ukraine-FFD500?style=flat&labelColor=005BBB + :alt: Support Ukraine - Help Provide Humanitarian Aid to Ukraine. + :target: https://opensource.fb.com/support-ukraine + +.. |readthedocs-badge| image:: https://readthedocs.org/projects/libcst/badge/?version=latest&style=flat :target: https://libcst.readthedocs.io/en/latest/ :alt: Documentation -.. |circleci-badge| image:: https://circleci.com/gh/Instagram/LibCST/tree/master.svg?style=shield&circle-token=f89ff46c689cf53116308db295a492d687bf5732 - :target: https://circleci.com/gh/Instagram/LibCST/tree/master - :alt: CircleCI - -.. |codecov-badge| image:: https://codecov.io/gh/Instagram/LibCST/branch/master/graph/badge.svg - :target: https://codecov.io/gh/Instagram/LibCST/branch/master - :alt: CodeCov +.. |ci-badge| image:: https://github.com/Instagram/LibCST/actions/workflows/build.yml/badge.svg + :target: https://github.com/Instagram/LibCST/actions/workflows/build.yml?query=branch%3Amain + :alt: Github Actions .. |pypi-badge| image:: https://img.shields.io/pypi/v/libcst.svg :target: https://pypi.org/project/libcst @@ -28,12 +28,16 @@ A Concrete Syntax Tree (CST) parser and serializer library for Python .. |notebook-badge| image:: https://img.shields.io/badge/notebook-run-579ACA.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC - :target: https://mybinder.org/v2/gh/Instagram/LibCST/master?filepath=docs%2Fsource%2Ftutorial.ipynb + :target: https://mybinder.org/v2/gh/Instagram/LibCST/main?filepath=docs%2Fsource%2Ftutorial.ipynb :alt: Notebook +.. |types-badge| image:: https://img.shields.io/pypi/types/libcst + :target: https://pypi.org/project/libcst + :alt: PYPI - Types + .. intro-start -LibCST parses Python 3.0, 3.1, 3.3, 3.5, 3.6, 3.7 or 3.8 source code as a CST tree that keeps +LibCST parses Python 3.0 -> 3.14 source code as a CST tree that keeps all formatting details (comments, whitespaces, parentheses, etc). It's useful for building automated refactoring (codemod) applications and linters. @@ -52,13 +56,15 @@ You can learn more about `the value that LibCST provides motivations for the project `__ in `our documentation `__. -Try it out with `notebook examples `__. +Try it out with `notebook examples `__. Example expression:: 1 + 2 -CST representation:: +CST representation: + +.. code-block:: python BinaryOperation( left=Integer( @@ -121,7 +127,7 @@ For a more detailed usage example, `see our documentation Installation ------------ -LibCST requires Python 3.6+ and can be easily installed using most common Python +LibCST requires Python 3.9+ and can be easily installed using most common Python packaging tools. We recommend installing the latest stable release from `PyPI `_ with pip: @@ -129,6 +135,11 @@ packaging tools. We recommend installing the latest stable release from pip install libcst +For parsing, LibCST ships with a native extension, so releases are distributed as binary +wheels as well as the source code. If a binary wheel is not available for your system +(Linux/Windows x86/x64 and Mac x64/arm are covered), you'll need a recent +`Rust toolchain `_ for installing. + Further Reading --------------- - `Static Analysis at Scale: An Instagram Story. `_ @@ -137,67 +148,49 @@ Further Reading Development ----------- -Start by setting up and activating a virtualenv: +See `CONTRIBUTING.md `_ for more details. + +Building +~~~~~~~~ + +In order to build LibCST, which includes a native parser module, you +will need to have the Rust build tool ``cargo`` on your path. You can +usually install ``cargo`` using your system package manager, but the +most popular way to install cargo is using +`rustup `_. + +To build just the native parser, do the following from the ``native`` +directory: .. code-block:: shell - git clone git@github.com:Instagram/LibCST.git libcst - cd libcst - python3 -m venv ../libcst-env/ # just an example, put this wherever you want - source ../libcst-env/bin/activate - pip install --upgrade pip # optional, if you have an old system version of pip - pip install -r requirements.txt -r requirements-dev.txt - # If you're done with the virtualenv, you can leave it by running: - deactivate + cargo build -We use `isort `_ and `black `_ -to format code. To format changes to be conformant, run the following in the root: +The ``libcst.native`` module should be rebuilt automatically, but to force it: .. code-block:: shell - tox -e autofix + uv sync --reinstall-package libcst -To run all tests, you'll need to install `tox `_ -and do the following in the root: - -.. code-block:: shell - - tox -e py37 - -You can also run individual tests by using unittest and specifying a module like -this: - -.. code-block:: shell - - python -m unittest libcst.tests.test_batched_visitor - -See the `unittest documentation `_ -for more examples of how to run tests. +Type Checking +~~~~~~~~~~~~~ We use `Pyre `_ for type-checking. -To set up pyre check environment: - -1. Copy the example Pyre config: ``cp .pyre_configuration.example .pyre_configuration``. -2. In the config file, add your venv site-packages dir to "search_path". (e.g. add "/workspace/libcst-env/lib/python3.7/site-packages") -3. Remove installed LibCST and install from the source code: - -.. code-block:: shell - - pip uninstall -y libcst - pip install -e . - To verify types for the library, do the following in the root: .. code-block:: shell - pyre check + uv run poe typecheck + +Generating Documents +~~~~~~~~~~~~~~~~~~~~ To generate documents, do the following in the root: .. code-block:: shell - tox -e docs + uv run --group docs poe docs Future ====== diff --git a/apt.txt b/apt.txt new file mode 100644 index 00000000..a3e85e90 --- /dev/null +++ b/apt.txt @@ -0,0 +1,2 @@ +rustc +cargo \ No newline at end of file diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index 32f5ee69..00000000 --- a/codecov.yml +++ /dev/null @@ -1,4 +0,0 @@ -coverage: - status: - project: no - patch: yes diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css index 80660072..5b756e8a 100644 --- a/docs/source/_static/custom.css +++ b/docs/source/_static/custom.css @@ -1,5 +1,5 @@ /** - * Copyright (c) Facebook, Inc. and its affiliates. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. diff --git a/docs/source/_static/img/python_scopes.png b/docs/source/_static/img/python_scopes.png index d5ad158f..0c1b0266 100644 Binary files a/docs/source/_static/img/python_scopes.png and b/docs/source/_static/img/python_scopes.png differ diff --git a/docs/source/_static/img/python_scopes.svg b/docs/source/_static/img/python_scopes.svg index 5865e27d..a1c04911 100644 --- a/docs/source/_static/img/python_scopes.svg +++ b/docs/source/_static/img/python_scopes.svg @@ -1,6 +1,4 @@ - - + inkscape:version="1.0.2 (e86c8708, 2021-01-15)" + sodipodi:docname="drawing.svg" + inkscape:export-filename="/Users/lpetre/Desktop/rect846-0.png" + inkscape:export-xdpi="191.53999" + inkscape:export-ydpi="191.53999"> - - - + @@ -63,223 +58,440 @@ - - - - - - - global scope - - - - class scope - - - - function scope - - - - - comprehension scope - - - - ITERATIONS = 10class Cls: class_attribute = 20 def fn(): for i in range(ITERATIONS): ... return [ i for i in range(10) ]Cls().fn() + id="layer1"> + + + + + builtin scope + + + + class range(stop) ... + + + + + + + global scope + + + + ITERATIONS = 10Cls().fn() + + + + + + + class scope + + + + class Cls: class_attribute = 20 + + + + + + + function scope + + + + def fn(): for i in range(ITERATIONS): ... + + + + + + + comprehension scope + + + + return [ i for i in range(10) ] + + diff --git a/docs/source/codemods_tutorial.rst b/docs/source/codemods_tutorial.rst index 78365ca9..6f657fbc 100644 --- a/docs/source/codemods_tutorial.rst +++ b/docs/source/codemods_tutorial.rst @@ -26,7 +26,7 @@ then edit the produced ``.libcst.codemod.yaml`` file:: python3 -m libcst.tool initialize . The file includes provisions for customizing any generated code marker, calling an -external code formatter such as `black `_, blackisting +external code formatter such as `black `_, blacklisting patterns of files you never wish to touch and a list of modules that contain valid codemods that can be executed. If you want to write and run codemods specific to your repository or organization, you can add an in-repo module location to the list of @@ -135,16 +135,18 @@ replaces any string which matches our string command-line argument with a consta It also takes care of adding the import required for the constant to be defined properly. Cool! Let's look at the command-line help for this codemod. Let's assume you saved it -as ``constant_folding.py`` inside ``libcst.codemod.commands``. You can get help for the +as ``constant_folding.py``. You can get help for the codemod by running the following command:: - python3 -m libcst.tool codemod constant_folding.ConvertConstantCommand --help + python3 -m libcst.tool codemod -x constant_folding.ConvertConstantCommand --help Notice that along with the default arguments, the ``--string`` and ``--constant`` arguments are present in the help, and the command-line description has been updated with the codemod's description string. You'll notice that the codemod also shows up on ``libcst.tool list``. +And ``-x`` flag allows to load any module as a codemod in addition to the standard ones. + ---------------- Testing Codemods ---------------- diff --git a/docs/source/conf.py b/docs/source/conf.py index 3eaff6a0..d3311e90 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -26,7 +26,7 @@ # -- Project information ----------------------------------------------------- project = "LibCST" -copyright = "2019, Facebook" +copyright = "Meta Platforms, Inc. and affiliates" author = "Benjamin Woodruff, Jennifer Taylor, Carl Meyer, Jimmy Lai, Ray Zeng" # The short X.Y version @@ -71,7 +71,7 @@ master_doc = "index" # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -196,6 +196,7 @@ intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True + # -- autodoc customization def strip_class_signature(app, what, name, obj, options, signature, return_annotation): if what == "class": @@ -218,7 +219,7 @@ def setup(app): nbsphinx_prolog = r""" -{% set docname = 'docs/source/' + env.doc2path(env.docname, base=None) %} +{% set docname = 'docs/source/' + env.doc2path(env.docname, base=None)|string%} .. only:: html @@ -227,6 +228,6 @@ nbsphinx_prolog = r""" Interactive online tutorial: |notebook-badge| .. |notebook-badge| image:: https://img.shields.io/badge/notebook-run-579ACA.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC - :target: https://mybinder.org/v2/gh/Instagram/LibCST/master?filepath={{ docname }} + :target: https://mybinder.org/v2/gh/Instagram/LibCST/main?filepath={{ docname }} :alt: Notebook """ diff --git a/docs/source/helpers.rst b/docs/source/helpers.rst index e4b94d2b..3cf5abfb 100644 --- a/docs/source/helpers.rst +++ b/docs/source/helpers.rst @@ -32,3 +32,18 @@ Functions that assist in traversing an existing LibCST tree. .. autofunction:: libcst.helpers.get_full_name_for_node .. autofunction:: libcst.helpers.get_full_name_for_node_or_raise .. autofunction:: libcst.helpers.ensure_type + +Node fields filtering Helpers +----------------------------- + +Function that assist when handling CST nodes' fields. + +.. autofunction:: libcst.helpers.filter_node_fields + +And lower level functions: + +.. autofunction:: libcst.helpers.get_node_fields +.. autofunction:: libcst.helpers.is_whitespace_node_field +.. autofunction:: libcst.helpers.is_syntax_node_field +.. autofunction:: libcst.helpers.is_default_node_field +.. autofunction:: libcst.helpers.get_field_default_value diff --git a/docs/source/matchers.rst b/docs/source/matchers.rst index eac6faa9..a89a1a68 100644 --- a/docs/source/matchers.rst +++ b/docs/source/matchers.rst @@ -5,7 +5,7 @@ Matchers ======== Matchers are provided as a way of asking whether a particular LibCST node and its -children match the a particular shape. It is possible to write a visitor that +children match a particular shape. It is possible to write a visitor that tracks attributes using ``visit_`` methods. It is also possible to implement manual instance checking and traversal of a node's children. However, both are cumbersome to write and hard to understand. Matchers offer a more concise way of @@ -13,7 +13,7 @@ defining what attributes on a node matter when matching against predefined patte To accomplish this, a matcher has been created which corresponds to each LibCST node documented in :ref:`libcst-nodes`. Matchers default each of their attributes -to the special sentinal matcher :func:`~libcst.matchers.DoNotCare`. When constructing +to the special sentinel matcher :func:`~libcst.matchers.DoNotCare`. When constructing a matcher, you can initialize the node with only the values of attributes that you are concerned with, leaving the rest of the attributes set to :func:`~libcst.matchers.DoNotCare` in order to skip comparing against them. @@ -79,7 +79,7 @@ Traversal Order ^^^^^^^^^^^^^^^ Visit and leave functions created using :func:`~libcst.matchers.visit` or -:func:`~libcst.matchers.leave` follow the traveral order rules laid out in +:func:`~libcst.matchers.leave` follow the traversal order rules laid out in LibCST's visitor :ref:`libcst-visitor-traversal` with one additional rule. Any visit function created using the :func:`~libcst.matchers.visit` decorator will be called **before** a ``visit_`` function if it is defined for your visitor. diff --git a/docs/source/matchers_tutorial.ipynb b/docs/source/matchers_tutorial.ipynb index 1add7bca..6222bf7f 100644 --- a/docs/source/matchers_tutorial.ipynb +++ b/docs/source/matchers_tutorial.ipynb @@ -234,7 +234,7 @@ "into your :ref:`libcst-visitors` in order to identify which nodes you care ", "about. Matcher :ref:`libcst-matcher-decorators` help reduce that boilerplate.\n", "\n", - "Say you wanted to invert the the boolean literals in functions which ", + "Say you wanted to invert the boolean literals in functions which ", "match the above ``best_is_call_with_booleans``. You could build something ", "that looks like the following:" ] diff --git a/docs/source/metadata.rst b/docs/source/metadata.rst index f6c9c078..9e450c97 100644 --- a/docs/source/metadata.rst +++ b/docs/source/metadata.rst @@ -18,10 +18,10 @@ numbers of nodes through the :class:`~libcst.metadata.PositionProvider`: .. code-block:: python class NamePrinter(cst.CSTVisitor): - METADATA_DEPENDENCIES = (cst.PositionProvider,) + METADATA_DEPENDENCIES = (cst.metadata.PositionProvider,) def visit_Name(self, node: cst.Name) -> None: - pos = self.get_metadata(cst.PositionProvider, node).start + pos = self.get_metadata(cst.metadata.PositionProvider, node).start print(f"{node.value} found at line {pos.line}, column {pos.column}") wrapper = cst.metadata.MetadataWrapper(cst.parse_module("x = 1")) @@ -40,8 +40,8 @@ The wrapper provides a :func:`~libcst.metadata.MetadataWrapper.resolve` function .. autoclass:: libcst.metadata.MetadataWrapper :special-members: __init__ -If you're working with visitors, which extend :class:`~libcst.MetadataDependent`, -metadata dependencies will be automatically computed when visited by a +If you're working with visitors, which extend :class:`~libcst.MetadataDependent`, +metadata dependencies will be automatically computed when visited by a :class:`~libcst.metadata.MetadataWrapper` and are accessible through :func:`~libcst.MetadataDependent.get_metadata` @@ -94,7 +94,7 @@ declaring one of :class:`~libcst.metadata.PositionProvider` or most cases, :class:`~libcst.metadata.PositionProvider` is what you probably want. -Node positions are is represented with :class:`~libcst.metadata.CodeRange` +Node positions are represented with :class:`~libcst.metadata.CodeRange` objects. See :ref:`the above example`. .. autoclass:: libcst.metadata.PositionProvider @@ -134,14 +134,15 @@ New scopes are created for classes, functions, and comprehensions. Other block constructs like conditional statements, loops, and try…except don't create their own scope. -There are four different type of scope in Python: +There are five different types of scopes in Python: +:class:`~libcst.metadata.BuiltinScope`, :class:`~libcst.metadata.GlobalScope`, :class:`~libcst.metadata.ClassScope`, :class:`~libcst.metadata.FunctionScope`, and :class:`~libcst.metadata.ComprehensionScope`. .. image:: _static/img/python_scopes.png - :alt: Diagram showing how the above 4 scopes are nested in each other + :alt: Diagram showing how the above 5 scopes are nested in each other :width: 400 :align: center @@ -175,6 +176,9 @@ assigned or accessed within. :no-undoc-members: :special-members: __contains__, __getitem__, __iter__ +.. autoclass:: libcst.metadata.BuiltinScope + :no-undoc-members: + .. autoclass:: libcst.metadata.GlobalScope :no-undoc-members: @@ -199,10 +203,18 @@ We don't call it `fully qualified name `__ is to automatically infer @@ -222,8 +242,8 @@ In Python, type checkers like `Mypy `_ or `Pyre `__ analyze `type annotations `__ and infer types for expressions. :class:`~libcst.metadata.TypeInferenceProvider` is provided by `Pyre Query API `__ -which requires `setup watchman `_ for incremental typechecking. -:class:`~libcst.metadata.FullRepoManger` is built for manage the inter process communication to Pyre. +which requires `setup watchman `_ for incremental typechecking. +:class:`~libcst.metadata.FullRepoManager` is built for manage the inter process communication to Pyre. .. autoclass:: libcst.metadata.TypeInferenceProvider :no-undoc-members: diff --git a/docs/source/scope_tutorial.ipynb b/docs/source/scope_tutorial.ipynb index e4d4393d..179e2ed7 100644 --- a/docs/source/scope_tutorial.ipynb +++ b/docs/source/scope_tutorial.ipynb @@ -90,7 +90,7 @@ "source": [ "Warn on unused imports and undefined references\n", "===============================================\n", - "To find all unused imports, we iterate through :attr:`~libcst.metadata.Scope.assignments` and an assignment is unused when its :attr:`~libcst.metadata.BaseAssignment.references` is empty. To find all undefined references, we iterate through :attr:`~libcst.metadata.Scope.accesses` (we focus on :class:`~libcst.Import`/:class:`~libcst.ImportFrom` assignments) and an access is undefined reference when its :attr:`~libcst.metadata.Access.referents` is empty. When reporting the warning to developer, we'll want to report the line number and column offset along with the suggestion to make it more clear. We can get position information from :class:`~libcst.metadata.PositionProvider` and print the warnings as follows.\n" + "To find all unused imports, we iterate through :attr:`~libcst.metadata.Scope.assignments` and an assignment is unused when its :attr:`~libcst.metadata.BaseAssignment.references` is empty. To find all undefined references, we iterate through :attr:`~libcst.metadata.Scope.accesses` (we focus on :class:`~libcst.Import`/:class:`~libcst.ImportFrom` assignments) and an access is undefined reference when its :attr:`~libcst.metadata.Access.referents` is empty. When reporting the warning to the developer, we'll want to report the line number and column offset along with the suggestion to make it more clear. We can get position information from :class:`~libcst.metadata.PositionProvider` and print the warnings as follows.\n" ] }, { @@ -136,13 +136,13 @@ "Automatically Remove Unused Import\n", "==================================\n", "Unused import is a commmon code suggestion provided by lint tool like `flake8 F401 `_ ``imported but unused``.\n", - "Even though reporting unused import is already useful, with LibCST we can provide automatic fix to remove unused import. That can make the suggestion more actionable and save developer's time.\n", + "Even though reporting unused imports is already useful, with LibCST we can provide an automatic fix to remove unused imports. That can make the suggestion more actionable and save developer's time.\n", "\n", "An import statement may import multiple names, we want to remove those unused names from the import statement. If all the names in the import statement are not used, we remove the entire import.\n", "To remove the unused name, we implement ``RemoveUnusedImportTransformer`` by subclassing :class:`~libcst.CSTTransformer`. We overwrite ``leave_Import`` and ``leave_ImportFrom`` to modify the import statements.\n", - "When we find the import node in lookup table, we iterate through all ``names`` and keep used names in ``names_to_keep``.\n", + "When we find the import node in the lookup table, we iterate through all ``names`` and keep used names in ``names_to_keep``.\n", "If ``names_to_keep`` is empty, all names are unused and we remove the entire import node.\n", - "Otherwise, we update the import node and just removing partial names." + "Otherwise, we update the import node and just remove partial names." ] }, { @@ -195,7 +195,7 @@ "raw_mimetype": "text/restructuredtext" }, "source": [ - "After the transform, we use ``.code`` to generate fixed code and all unused names are fixed as expected! The difflib is used to show only changed part and only import lines are updated as expected." + "After the transform, we use ``.code`` to generate the fixed code and all unused names are fixed as expected! The difflib is used to show only the changed part and only imported lines are updated as expected." ] }, { diff --git a/docs/source/tutorial.ipynb b/docs/source/tutorial.ipynb index 1fe57070..1b1ad00d 100644 --- a/docs/source/tutorial.ipynb +++ b/docs/source/tutorial.ipynb @@ -1,24 +1,25 @@ { "cells": [ { - "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, + "cell_type": "raw", "source": [ "====================\n", "Parsing and Visiting\n", "====================\n", "\n", - "LibCST provides helpers to parse source code string as concrete syntax tree. In order to perform static analysis to identify patterns in the tree or modify the tree programmatically, we can use visitor pattern to traverse the tree. In this tutorial, we demonstrate a common three-step-workflow to build an automated refactoring (codemod) application:\n", + "LibCST provides helpers to parse source code string as a concrete syntax tree. In order to perform static analysis to identify patterns in the tree or modify the tree programmatically, we can use the visitor pattern to traverse the tree. In this tutorial, we demonstrate a common four-step-workflow to build an automated refactoring (codemod) application:\n", "\n", "1. `Parse Source Code <#Parse-Source-Code>`_\n", - "2. `Build Visitor or Transformer <#Build-Visitor-or-Transformer>`_\n", - "3. `Generate Source Code <#Generate-Source-Code>`_\n", + "2. `Display The Source Code CST <#Display-Source-Code-CST>`_\n", + "3. `Build Visitor or Transformer <#Build-Visitor-or-Transformer>`_\n", + "4. `Generate Source Code <#Generate-Source-Code>`_\n", "\n", "Parse Source Code\n", "=================\n", - "LibCST provides various helpers to parse source code as concrete syntax tree: :func:`~libcst.parse_module`, :func:`~libcst.parse_expression` and :func:`~libcst.parse_statement` (see :doc:`Parsing ` for more detail). The default :class:`~libcst.CSTNode` repr provides pretty print formatting for reading the tree easily." + "LibCST provides various helpers to parse source code as a concrete syntax tree: :func:`~libcst.parse_module`, :func:`~libcst.parse_expression` and :func:`~libcst.parse_statement` (see :doc:`Parsing ` for more detail)." ] }, { @@ -41,7 +42,42 @@ "source": [ "import libcst as cst\n", "\n", - "cst.parse_expression(\"1 + 2\")" + "source_tree = cst.parse_expression(\"1 + 2\")" + ] + }, + { + "metadata": { + "raw_mimetype": "text/restructuredtext" + }, + "cell_type": "raw", + "source": [ + "|\n", + "Display Source Code CST\n", + "=======================\n", + "The default :class:`~libcst.CSTNode` repr provides pretty print formatting for displaying the entire CST tree." + ] + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": "print(source_tree)" + }, + { + "metadata": {}, + "cell_type": "raw", + "source": "The entire CST tree may be overwhelming at times. To only focus on essential elements of the CST tree, LibCST provides the ``dump`` helper." + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": [ + "from libcst.display import dump\n", + "\n", + "print(dump(source_tree))" ] }, { @@ -50,9 +86,11 @@ "raw_mimetype": "text/restructuredtext" }, "source": [ + " \n", + "|\n", "Example: add typing annotation from pyi stub file to Python source\n", "------------------------------------------------------------------\n", - "Python `typing annotation `_ was added in Python 3.5. Some Python applications add typing annotations in separate ``pyi`` stub files in order to support old Python versions. When applications decide to stop supporting old Python versions, they'll want to automatically copy the type annotation from a pyi file to a source file. Here we demonstrate how to do that easliy using LibCST. The first step is to parse the pyi stub and source files as trees." + "Python `typing annotation `_ was added in Python 3.5. Some Python applications add typing annotations in separate ``pyi`` stub files in order to support old Python versions. When applications decide to stop supporting old Python versions, they'll want to automatically copy the type annotation from a pyi file to a source file. Here we demonstrate how to do that easily using LibCST. The first step is to parse the pyi stub and source files as trees." ] }, { @@ -68,7 +106,7 @@ " self._replace(type=self.type.name))\n", "\n", "def tokenize(code, version_info, start_pos=(1, 0)):\n", - " \"\"\"Generate tokens from a the source code (string).\"\"\"\n", + " \"\"\"Generate tokens from the source code (string).\"\"\"\n", " lines = split_lines(code, keepends=True)\n", " return tokenize_lines(lines, version_info, start_pos=start_pos)\n", "'''\n", @@ -92,10 +130,11 @@ "raw_mimetype": "text/restructuredtext" }, "source": [ + "|\n", "Build Visitor or Transformer\n", "============================\n", "For traversing and modifying the tree, LibCST provides Visitor and Transformer classes similar to the `ast module `_. To implement a visitor (read only) or transformer (read/write), simply implement a subclass of :class:`~libcst.CSTVisitor` or :class:`~libcst.CSTTransformer` (see :doc:`Visitors ` for more detail).\n", - "In the typing example, we need to implement a visitor to collect typing annotation from the stub tree and a transformer to copy the annotation to the function signature. In the visitor, we implement ``visit_FunctionDef`` to collect annotations. Later in the transformer, we implement ``leave_FunctionDef`` to add the collected annotations." + "In the typing example, we need to implement a visitor to collect typing annotations from the stub tree and a transformer to copy the annotation to the function signature. In the visitor, we implement ``visit_FunctionDef`` to collect annotations. Later in the transformer, we implement ``leave_FunctionDef`` to add the collected annotations." ] }, { @@ -113,7 +152,7 @@ " self.stack: List[Tuple[str, ...]] = []\n", " # store the annotations\n", " self.annotations: Dict[\n", - " Tuple[str, ...], # key: tuple of cononical class/function name\n", + " Tuple[str, ...], # key: tuple of canonical class/function name\n", " Tuple[cst.Parameters, Optional[cst.Annotation]], # value: (params, returns)\n", " ] = {}\n", "\n", @@ -140,7 +179,7 @@ " self.stack: List[Tuple[str, ...]] = []\n", " # store the annotations\n", " self.annotations: Dict[\n", - " Tuple[str, ...], # key: tuple of cononical class/function name\n", + " Tuple[str, ...], # key: tuple of canonical class/function name\n", " Tuple[cst.Parameters, Optional[cst.Annotation]], # value: (params, returns)\n", " ] = annotations\n", "\n", @@ -184,9 +223,10 @@ "raw_mimetype": "text/restructuredtext" }, "source": [ + "|\n", "Generate Source Code\n", "====================\n", - "Generating the source code from a cst tree is as easy as accessing the :attr:`~libcst.Module.code` attribute on :class:`~libcst.Module`. After the code generation, we often use `Black `_ and `isort `_ to reformate the code to keep a consistent coding style." + "Generating the source code from a cst tree is as easy as accessing the :attr:`~libcst.Module.code` attribute on :class:`~libcst.Module`. After the code generation, we often use `ufmt `_ to reformat the code to keep a consistent coding style." ] }, { diff --git a/docs/source/visitors.rst b/docs/source/visitors.rst index a2b9ee90..722959e1 100644 --- a/docs/source/visitors.rst +++ b/docs/source/visitors.rst @@ -7,6 +7,7 @@ Visitors .. autoclass:: libcst.CSTTransformer .. autofunction:: libcst.RemoveFromParent .. autoclass:: libcst.RemovalSentinel +.. autoclass:: libcst.FlattenSentinel Visit and Leave Helper Functions -------------------------------- diff --git a/libcst/__init__.py b/libcst/__init__.py index 39b0f6dc..0cd54d62 100644 --- a/libcst/__init__.py +++ b/libcst/__init__.py @@ -1,10 +1,11 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst._batched_visitor import BatchableCSTVisitor, visit_batched -from libcst._exceptions import MetadataException, ParserSyntaxError +from libcst._exceptions import CSTLogicError, MetadataException, ParserSyntaxError +from libcst._flatten_sentinel import FlattenSentinel from libcst._maybe_sentinel import MaybeSentinel from libcst._metadata_dependent import MetadataDependent from libcst._nodes.base import CSTNode, CSTValidationError @@ -28,6 +29,7 @@ from libcst._nodes.expression import ( BaseSimpleComp, BaseSlice, BaseString, + BaseTemplatedStringContent, BinaryOperation, BooleanOperation, Call, @@ -74,6 +76,9 @@ from libcst._nodes.expression import ( StarredElement, Subscript, SubscriptElement, + TemplatedString, + TemplatedStringExpression, + TemplatedStringText, Tuple, UnaryOperation, Yield, @@ -152,6 +157,7 @@ from libcst._nodes.statement import ( Del, Else, ExceptHandler, + ExceptStarHandler, Expr, Finally, For, @@ -162,14 +168,38 @@ from libcst._nodes.statement import ( ImportAlias, ImportFrom, IndentedBlock, + Match, + MatchAs, + MatchCase, + MatchClass, + MatchKeywordElement, + MatchList, + MatchMapping, + MatchMappingElement, + MatchOr, + MatchOrElement, + MatchPattern, + MatchSequence, + MatchSequenceElement, + MatchSingleton, + MatchStar, + MatchTuple, + MatchValue, NameItem, Nonlocal, + ParamSpec, Pass, Raise, Return, SimpleStatementLine, SimpleStatementSuite, Try, + TryStar, + TypeAlias, + TypeParam, + TypeParameters, + TypeVar, + TypeVarTuple, While, With, WithItem, @@ -189,8 +219,12 @@ from libcst._parser.types.config import ( PartialParserConfig, ) from libcst._removal_sentinel import RemovalSentinel, RemoveFromParent -from libcst._version import LIBCST_VERSION from libcst._visitors import CSTNodeT, CSTTransformer, CSTVisitor, CSTVisitorT + +try: + from libcst._version import version as LIBCST_VERSION +except ImportError: + LIBCST_VERSION = "unknown" from libcst.helpers import ( # from libcst import ensure_type is deprecated, will be removed in 0.4.0 ensure_type, ) @@ -201,7 +235,6 @@ from libcst.metadata.base_provider import ( ) from libcst.metadata.wrapper import MetadataWrapper - __all__ = [ "KNOWN_PYTHON_VERSION_STRINGS", "LIBCST_VERSION", @@ -211,7 +244,9 @@ __all__ = [ "CSTValidationError", "CSTVisitor", "CSTVisitorT", + "FlattenSentinel", "MaybeSentinel", + "CSTLogicError", "MetadataException", "ParserSyntaxError", "PartialParserConfig", @@ -237,6 +272,7 @@ __all__ = [ "BaseElement", "BaseExpression", "BaseFormattedStringContent", + "BaseTemplatedStringContent", "BaseList", "BaseNumber", "BaseSet", @@ -260,6 +296,9 @@ __all__ = [ "FormattedString", "FormattedStringExpression", "FormattedStringText", + "TemplatedString", + "TemplatedStringText", + "TemplatedStringExpression", "From", "GeneratorExp", "IfExp", @@ -362,6 +401,7 @@ __all__ = [ "Del", "Else", "ExceptHandler", + "ExceptStarHandler", "Expr", "Finally", "For", @@ -372,6 +412,23 @@ __all__ = [ "ImportAlias", "ImportFrom", "IndentedBlock", + "Match", + "MatchCase", + "MatchAs", + "MatchClass", + "MatchKeywordElement", + "MatchList", + "MatchMapping", + "MatchMappingElement", + "MatchOr", + "MatchOrElement", + "MatchPattern", + "MatchSequence", + "MatchSequenceElement", + "MatchSingleton", + "MatchStar", + "MatchTuple", + "MatchValue", "NameItem", "Nonlocal", "Pass", @@ -380,6 +437,7 @@ __all__ = [ "SimpleStatementLine", "SimpleStatementSuite", "Try", + "TryStar", "While", "With", "WithItem", @@ -395,4 +453,10 @@ __all__ = [ "VisitorMetadataProvider", "MetadataDependent", "MetadataWrapper", + "TypeVar", + "TypeVarTuple", + "ParamSpec", + "TypeParam", + "TypeParameters", + "TypeAlias", ] diff --git a/libcst/_add_slots.py b/libcst/_add_slots.py index 706f5d10..7012ce1a 100644 --- a/libcst/_add_slots.py +++ b/libcst/_add_slots.py @@ -1,11 +1,12 @@ # This file is derived from github.com/ericvsmith/dataclasses, and is Apache 2 licensed. # https://github.com/ericvsmith/dataclasses/blob/ae712dd993420d43444f188f452/LICENSE.txt # https://github.com/ericvsmith/dataclasses/blob/ae712dd993420d43444f/dataclass_tools.py +# Changed: takes slots in base classes into account when creating slots import dataclasses +from itertools import chain, filterfalse from typing import Any, Mapping, Type, TypeVar - _T = TypeVar("_T") @@ -20,7 +21,14 @@ def add_slots(cls: Type[_T]) -> Type[_T]: # Create a new dict for our new class. cls_dict = dict(cls.__dict__) field_names = tuple(f.name for f in dataclasses.fields(cls)) - cls_dict["__slots__"] = field_names + inherited_slots = set( + chain.from_iterable( + superclass.__dict__.get("__slots__", ()) for superclass in cls.mro() + ) + ) + cls_dict["__slots__"] = tuple( + filterfalse(inherited_slots.__contains__, field_names) + ) for field_name in field_names: # Remove our attributes, if present. They'll still be # available in _MARKER. @@ -30,15 +38,10 @@ def add_slots(cls: Type[_T]) -> Type[_T]: # Create the class. qualname = getattr(cls, "__qualname__", None) - try: - # GenericMeta in py3.6 requires us to track __orig_bases__. This is fixed in py3.7 - # by the removal of GenericMeta. We should just be able to use cls.__bases__ in the - # future. - bases = getattr(cls, "__orig_bases__", cls.__bases__) - cls = type(cls)(cls.__name__, bases, cls_dict) - except TypeError: - # We're in py3.7 and should use cls.__bases__ - cls = type(cls)(cls.__name__, cls.__bases__, cls_dict) + + # pyre-fixme[9]: cls has type `Type[Variable[_T]]`; used as `_T`. + # pyre-fixme[19]: Expected 0 positional arguments. + cls = type(cls)(cls.__name__, cls.__bases__, cls_dict) if qualname is not None: cls.__qualname__ = qualname @@ -47,12 +50,14 @@ def add_slots(cls: Type[_T]) -> Type[_T]: def __getstate__(self: object) -> Mapping[str, Any]: return { - slot: getattr(self, slot) for slot in self.__slots__ if hasattr(self, slot) + field.name: getattr(self, field.name) + for field in dataclasses.fields(self) + if hasattr(self, field.name) } def __setstate__(self: object, state: Mapping[str, Any]) -> None: - for slot, value in state.items(): - object.__setattr__(self, slot, value) + for fieldname, value in state.items(): + object.__setattr__(self, fieldname, value) cls.__getstate__ = __getstate__ cls.__setstate__ = __setstate__ diff --git a/libcst/_batched_visitor.py b/libcst/_batched_visitor.py index 9264c4c9..d853738f 100644 --- a/libcst/_batched_visitor.py +++ b/libcst/_batched_visitor.py @@ -1,25 +1,24 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect from typing import ( - TYPE_CHECKING, Callable, + cast, Iterable, List, Mapping, MutableMapping, Optional, - cast, + TYPE_CHECKING, ) from libcst._metadata_dependent import MetadataDependent from libcst._typed_visitor import CSTTypedVisitorFunctions from libcst._visitors import CSTNodeT, CSTVisitor - if TYPE_CHECKING: from libcst._nodes.base import CSTNode # noqa: F401 diff --git a/libcst/_exceptions.py b/libcst/_exceptions.py index fe41f0ee..4d3dd386 100644 --- a/libcst/_exceptions.py +++ b/libcst/_exceptions.py @@ -1,22 +1,14 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from enum import Enum, auto -from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union +from enum import auto, Enum +from typing import Any, Callable, final, Optional, Sequence, Tuple -from typing_extensions import final - -from libcst._parser.parso.pgen2.generator import ReservedString -from libcst._parser.parso.python.token import PythonTokenTypes, TokenType -from libcst._parser.types.token import Token from libcst._tabs import expand_tabs -_EOF_STR: str = "end of file (EOF)" -_INDENT_STR: str = "an indent" -_DEDENT_STR: str = "a dedent" _NEWLINE_CHARS: str = "\r\n" @@ -24,42 +16,10 @@ class EOFSentinel(Enum): EOF = auto() -def get_expected_str( - encountered: Union[Token, EOFSentinel], - expected: Union[Iterable[Union[TokenType, ReservedString]], EOFSentinel], -) -> str: - if ( - isinstance(encountered, EOFSentinel) - or encountered.type is PythonTokenTypes.ENDMARKER - ): - encountered_str = _EOF_STR - elif encountered.type is PythonTokenTypes.INDENT: - encountered_str = _INDENT_STR - elif encountered.type is PythonTokenTypes.DEDENT: - encountered_str = _DEDENT_STR - else: - encountered_str = repr(encountered.string) +class CSTLogicError(Exception): + """General purpose internal error within LibCST itself.""" - if isinstance(expected, EOFSentinel): - expected_names = [_EOF_STR] - else: - expected_names = sorted( - [ - repr(el.name) if isinstance(el, TokenType) else repr(el.value) - for el in expected - ] - ) - - if len(expected_names) > 10: - # There's too many possibilities, so it's probably not useful to list them. - # Instead, let's just abbreviate the message. - return f"Unexpectedly encountered {encountered_str}." - else: - if len(expected_names) == 1: - expected_str = expected_names[0] - else: - expected_str = f"{', '.join(expected_names[:-1])}, or {expected_names[-1]}" - return f"Encountered {encountered_str}, but expected {expected_str}." + pass # pyre-fixme[2]: 'Any' type isn't pyre-strict. diff --git a/libcst/_flatten_sentinel.py b/libcst/_flatten_sentinel.py new file mode 100644 index 00000000..b41d66ef --- /dev/null +++ b/libcst/_flatten_sentinel.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import sys + +# PEP 585 +if sys.version_info < (3, 9): + from typing import Iterable, Sequence +else: + from collections.abc import Iterable, Sequence + +from libcst._types import CSTNodeT_co + + +class FlattenSentinel(Sequence[CSTNodeT_co]): + """ + A :class:`FlattenSentinel` may be returned by a :meth:`CSTTransformer.on_leave` + method when one wants to replace a node with multiple nodes. The replaced + node must be contained in a `Sequence` attribute such as + :attr:`~libcst.Module.body`. This is generally the case for + :class:`~libcst.BaseStatement` and :class:`~libcst.BaseSmallStatement`. + For example to insert a print before every return:: + + def leave_Return( + self, original_node: cst.Return, updated_node: cst.Return + ) -> Union[cst.Return, cst.RemovalSentinel, cst.FlattenSentinel[cst.BaseSmallStatement]]: + log_stmt = cst.Expr(cst.parse_expression("print('returning')")) + return cst.FlattenSentinel([log_stmt, updated_node]) + + Returning an empty :class:`FlattenSentinel` is equivalent to returning + :attr:`cst.RemovalSentinel.REMOVE` and is subject to its requirements. + """ + + nodes: Sequence[CSTNodeT_co] + + def __init__(self, nodes: Iterable[CSTNodeT_co]) -> None: + self.nodes = tuple(nodes) + + def __getitem__(self, idx: int) -> CSTNodeT_co: + return self.nodes[idx] + + def __len__(self) -> int: + return len(self.nodes) diff --git a/libcst/_maybe_sentinel.py b/libcst/_maybe_sentinel.py index dc968f95..d5eaab11 100644 --- a/libcst/_maybe_sentinel.py +++ b/libcst/_maybe_sentinel.py @@ -1,9 +1,9 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from enum import Enum, auto +from enum import auto, Enum class MaybeSentinel(Enum): diff --git a/libcst/_metadata_dependent.py b/libcst/_metadata_dependent.py index c1627713..4faf7472 100644 --- a/libcst/_metadata_dependent.py +++ b/libcst/_metadata_dependent.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,17 +7,19 @@ import inspect from abc import ABC from contextlib import contextmanager from typing import ( - TYPE_CHECKING, + Callable, + cast, ClassVar, Collection, + Generic, Iterator, Mapping, Type, + TYPE_CHECKING, TypeVar, - cast, + Union, ) - if TYPE_CHECKING: # Circular dependency for typing reasons only from libcst._nodes.base import CSTNode # noqa: F401 @@ -30,7 +32,28 @@ if TYPE_CHECKING: _T = TypeVar("_T") -_UNDEFINED_DEFAULT = object() + +class _UNDEFINED_DEFAULT: + pass + + +class LazyValue(Generic[_T]): + """ + The class for implementing a lazy metadata loading mechanism that improves the + performance when retriving expensive metadata (e.g., qualified names). Providers + including :class:`~libcst.metadata.QualifiedNameProvider` use this class to load + the metadata of a certain node lazily when calling + :func:`~libcst.MetadataDependent.get_metadata`. + """ + + def __init__(self, callable: Callable[[], _T]) -> None: + self.callable = callable + self.return_value: Union[_T, Type[_UNDEFINED_DEFAULT]] = _UNDEFINED_DEFAULT + + def __call__(self) -> _T: + if self.return_value is _UNDEFINED_DEFAULT: + self.return_value = self.callable() + return cast(_T, self.return_value) class MetadataDependent(ABC): @@ -108,6 +131,9 @@ class MetadataDependent(ABC): ) if default is not _UNDEFINED_DEFAULT: - return cast(_T, self.metadata[key].get(node, default)) + value = self.metadata[key].get(node, default) else: - return cast(_T, self.metadata[key][node]) + value = self.metadata[key][node] + if isinstance(value, LazyValue): + value = value() + return cast(_T, value) diff --git a/libcst/_nodes/__init__.py b/libcst/_nodes/__init__.py index 01f1f091..35d483bc 100644 --- a/libcst/_nodes/__init__.py +++ b/libcst/_nodes/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/base.py b/libcst/_nodes/base.py index fe2988c9..666fe311 100644 --- a/libcst/_nodes/base.py +++ b/libcst/_nodes/base.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,15 +6,16 @@ from abc import ABC, abstractmethod from copy import deepcopy from dataclasses import dataclass, field, fields, replace -from typing import Any, Dict, List, Mapping, Sequence, TypeVar, Union, cast +from typing import Any, cast, ClassVar, Dict, List, Mapping, Sequence, TypeVar, Union +from libcst import CSTLogicError +from libcst._flatten_sentinel import FlattenSentinel from libcst._nodes.internal import CodegenState from libcst._removal_sentinel import RemovalSentinel from libcst._type_enforce import is_value_of_type from libcst._types import CSTNodeT from libcst._visitors import CSTTransformer, CSTVisitor, CSTVisitorT - _CSTNodeSelfT = TypeVar("_CSTNodeSelfT", bound="CSTNode") _EMPTY_SEQUENCE: Sequence["CSTNode"] = () @@ -109,6 +110,8 @@ def _clone(val: object) -> object: @dataclass(frozen=True) class CSTNode(ABC): + __slots__: ClassVar[Sequence[str]] = () + def __post_init__(self) -> None: # PERF: It might make more sense to move validation work into the visitor, which # would allow us to avoid validating the tree when parsing a file. @@ -207,7 +210,7 @@ class CSTNode(ABC): def visit( self: _CSTNodeSelfT, visitor: CSTVisitorT - ) -> Union[_CSTNodeSelfT, RemovalSentinel]: + ) -> Union[_CSTNodeSelfT, RemovalSentinel, FlattenSentinel[_CSTNodeSelfT]]: """ Visits the current node, its children, and all transitive children using the given visitor's callbacks. @@ -234,8 +237,8 @@ class CSTNode(ABC): leave_result = visitor.on_leave(self, with_updated_children) # validate return type of the user-defined `visitor.on_leave` method - if not isinstance(leave_result, (CSTNode, RemovalSentinel)): - raise Exception( + if not isinstance(leave_result, (CSTNode, RemovalSentinel, FlattenSentinel)): + raise CSTValidationError( "Expected a node of type CSTNode or a RemovalSentinel, " + f"but got a return value of {type(leave_result).__name__}" ) @@ -290,8 +293,7 @@ class CSTNode(ABC): return False @abstractmethod - def _codegen_impl(self, state: CodegenState) -> None: - ... + def _codegen_impl(self, state: CodegenState) -> None: ... def _codegen(self, state: CodegenState, **kwargs: Any) -> None: state.before_codegen(self) @@ -379,9 +381,9 @@ class CSTNode(ABC): child, all instances will be replaced. """ new_tree = self.visit(_ChildReplacementTransformer(old_node, new_node)) - if isinstance(new_tree, RemovalSentinel): - # The above transform never returns RemovalSentinel, so this isn't possible - raise Exception("Logic error, cannot get a RemovalSentinel here!") + if isinstance(new_tree, (FlattenSentinel, RemovalSentinel)): + # The above transform never returns *Sentinel, so this isn't possible + raise CSTLogicError("Logic error, cannot get a *Sentinel here!") return new_tree def deep_remove( @@ -392,10 +394,16 @@ class CSTNode(ABC): have previously modified the tree in a way that ``old_node`` appears more than once as a deep child, all instances will be removed. """ - return self.visit( + new_tree = self.visit( _ChildReplacementTransformer(old_node, RemovalSentinel.REMOVE) ) + if isinstance(new_tree, FlattenSentinel): + # The above transform never returns FlattenSentinel, so this isn't possible + raise CSTLogicError("Logic error, cannot get a FlattenSentinel here!") + + return new_tree + def with_deep_changes( self: _CSTNodeSelfT, old_node: "CSTNode", **changes: Any ) -> _CSTNodeSelfT: @@ -412,12 +420,12 @@ class CSTNode(ABC): similar API in the future. """ new_tree = self.visit(_ChildWithChangesTransformer(old_node, changes)) - if isinstance(new_tree, RemovalSentinel): + if isinstance(new_tree, (FlattenSentinel, RemovalSentinel)): # This is impossible with the above transform. - raise Exception("Logic error, cannot get a RemovalSentinel here!") + raise CSTLogicError("Logic error, cannot get a *Sentinel here!") return new_tree - def __eq__(self: _CSTNodeSelfT, other: _CSTNodeSelfT) -> bool: + def __eq__(self: _CSTNodeSelfT, other: object) -> bool: """ CSTNodes are only treated as equal by identity. This matches the behavior of CPython's AST nodes. @@ -462,6 +470,8 @@ class CSTNode(ABC): class BaseLeaf(CSTNode, ABC): + __slots__ = () + @property def children(self) -> Sequence[CSTNode]: # override this with an optimized implementation @@ -481,6 +491,8 @@ class BaseValueToken(BaseLeaf, ABC): into the parent CSTNode, and hard-coded into the implementation of _codegen. """ + __slots__ = () + value: str def _codegen_impl(self, state: CodegenState) -> None: diff --git a/libcst/_nodes/deep_equals.py b/libcst/_nodes/deep_equals.py index 1e18227c..de08be7f 100644 --- a/libcst/_nodes/deep_equals.py +++ b/libcst/_nodes/deep_equals.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/expression.py b/libcst/_nodes/expression.py index 25b372d1..eb95d9b3 100644 --- a/libcst/_nodes/expression.py +++ b/libcst/_nodes/expression.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,15 +9,15 @@ from abc import ABC, abstractmethod from ast import literal_eval from contextlib import contextmanager from dataclasses import dataclass, field -from enum import Enum, auto +from enum import auto, Enum from tokenize import ( Floatnumber as FLOATNUMBER_RE, Imagnumber as IMAGNUMBER_RE, Intnumber as INTNUMBER_RE, ) -from typing import Callable, Generator, Optional, Sequence, Union +from typing import Callable, Generator, Literal, Optional, Sequence, Union -from typing_extensions import Literal +from libcst import CSTLogicError from libcst._add_slots import add_slots from libcst._maybe_sentinel import MaybeSentinel @@ -222,6 +222,8 @@ class _BaseParenthesizedNode(CSTNode, ABC): this to get that functionality. """ + __slots__ = () + lpar: Sequence[LeftParen] = () # Sequence of parenthesis for precedence dictation. rpar: Sequence[RightParen] = () @@ -254,6 +256,8 @@ class BaseExpression(_BaseParenthesizedNode, ABC): An base class for all expressions. :class:`BaseExpression` contains no fields. """ + __slots__ = () + def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: """ Returns true if this expression is safe to be use with a word operator @@ -296,7 +300,7 @@ class BaseAssignTargetExpression(BaseExpression, ABC): `_. """ - pass + __slots__ = () class BaseDelTargetExpression(BaseExpression, ABC): @@ -316,7 +320,7 @@ class BaseDelTargetExpression(BaseExpression, ABC): `_. """ - pass + __slots__ = () @add_slots @@ -350,7 +354,7 @@ class Name(BaseAssignTargetExpression, BaseDelTargetExpression): if len(self.value) == 0: raise CSTValidationError("Cannot have empty name identifier.") if not self.value.isidentifier(): - raise CSTValidationError("Name is not a valid identifier.") + raise CSTValidationError(f"Name {self.value!r} is not a valid identifier.") def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): @@ -393,6 +397,8 @@ class BaseNumber(BaseExpression, ABC): used anywhere that you need to explicitly take any number type. """ + __slots__ = () + def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: """ Numbers are funny. The expression "5in [1,2,3,4,5]" is a valid expression @@ -522,13 +528,15 @@ class BaseString(BaseExpression, ABC): :class:`SimpleString`, :class:`ConcatenatedString`, and :class:`FormattedString`. """ - pass + __slots__ = () StringQuoteLiteral = Literal['"', "'", '"""', "'''"] class _BasePrefixedString(BaseString, ABC): + __slots__ = () + @property def prefix(self) -> str: """ @@ -647,14 +655,20 @@ class SimpleString(_BasePrefixedString): if len(quote) == 2: # Let's assume this is an empty string. quote = quote[:1] - elif len(quote) == 6: - # Let's assume this is an empty triple-quoted string. + elif 3 < len(quote) <= 6: + # Let's assume this can be one of the following: + # >>> """"foo""" + # '"foo' + # >>> """""bar""" + # '""bar' + # >>> """""" + # '' quote = quote[:3] if len(quote) not in {1, 3}: # We shouldn't get here due to construction validation logic, # but handle the case anyway. - raise Exception("Invalid string {self.value}") + raise CSTLogicError(f"Invalid string {self.value}") # pyre-ignore We know via the above validation that we will only # ever return one of the four string literals. @@ -685,7 +699,7 @@ class SimpleString(_BasePrefixedString): state.add_token(self.value) @property - def evaluated_value(self) -> str: + def evaluated_value(self) -> Union[str, bytes]: """ Return an :func:`ast.literal_eval` evaluated str of :py:attr:`value`. """ @@ -699,7 +713,7 @@ class BaseFormattedStringContent(CSTNode, ABC): sequence of :class:`BaseFormattedStringContent` parts. """ - pass + __slots__ = () @add_slots @@ -944,6 +958,253 @@ class FormattedString(_BasePrefixedString): state.add_token(self.end) +class BaseTemplatedStringContent(CSTNode, ABC): + """ + The base type for :class:`TemplatedStringText` and + :class:`TemplatedStringExpression`. A :class:`TemplatedString` is composed of a + sequence of :class:`BaseTemplatedStringContent` parts. + """ + + __slots__ = () + + +@add_slots +@dataclass(frozen=True) +class TemplatedStringText(BaseTemplatedStringContent): + """ + Part of a :class:`TemplatedString` that is not inside curly braces (``{`` or ``}``). + For example, in:: + + f"ab{cd}ef" + + ``ab`` and ``ef`` are :class:`TemplatedStringText` nodes, but ``{cd}`` is a + :class:`TemplatedStringExpression`. + """ + + #: The raw string value, including any escape characters present in the source + #: code, not including any enclosing quotes. + value: str + + def _visit_and_replace_children( + self, visitor: CSTVisitorT + ) -> "TemplatedStringText": + return TemplatedStringText(value=self.value) + + def _codegen_impl(self, state: CodegenState) -> None: + state.add_token(self.value) + + +@add_slots +@dataclass(frozen=True) +class TemplatedStringExpression(BaseTemplatedStringContent): + """ + Part of a :class:`TemplatedString` that is inside curly braces (``{`` or ``}``), + including the surrounding curly braces. For example, in:: + + f"ab{cd}ef" + + ``{cd}`` is a :class:`TemplatedStringExpression`, but ``ab`` and ``ef`` are + :class:`TemplatedStringText` nodes. + + An t-string expression may contain ``conversion`` and ``format_spec`` suffixes that + control how the expression is converted to a string. + """ + + #: The expression we will evaluate and render when generating the string. + expression: BaseExpression + + #: An optional conversion specifier, such as ``!s``, ``!r`` or ``!a``. + conversion: Optional[str] = None + + #: An optional format specifier following the `format specification mini-language + #: `_. + format_spec: Optional[Sequence[BaseTemplatedStringContent]] = None + + #: Whitespace after the opening curly brace (``{``), but before the ``expression``. + whitespace_before_expression: BaseParenthesizableWhitespace = ( + SimpleWhitespace.field("") + ) + + #: Whitespace after the ``expression``, but before the ``conversion``, + #: ``format_spec`` and the closing curly brace (``}``). Python does not + #: allow whitespace inside or after a ``conversion`` or ``format_spec``. + whitespace_after_expression: BaseParenthesizableWhitespace = SimpleWhitespace.field( + "" + ) + + #: Equal sign for Templated string expression uses self-documenting expressions, + #: such as ``f"{x=}"``. See the `Python 3.8 release notes + #: `_. + equal: Optional[AssignEqual] = None + + def _validate(self) -> None: + if self.conversion is not None and self.conversion not in ("s", "r", "a"): + raise CSTValidationError("Invalid t-string conversion.") + + def _visit_and_replace_children( + self, visitor: CSTVisitorT + ) -> "TemplatedStringExpression": + format_spec = self.format_spec + return TemplatedStringExpression( + whitespace_before_expression=visit_required( + self, + "whitespace_before_expression", + self.whitespace_before_expression, + visitor, + ), + expression=visit_required(self, "expression", self.expression, visitor), + equal=visit_optional(self, "equal", self.equal, visitor), + whitespace_after_expression=visit_required( + self, + "whitespace_after_expression", + self.whitespace_after_expression, + visitor, + ), + conversion=self.conversion, + format_spec=( + visit_sequence(self, "format_spec", format_spec, visitor) + if format_spec is not None + else None + ), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + state.add_token("{") + self.whitespace_before_expression._codegen(state) + self.expression._codegen(state) + equal = self.equal + if equal is not None: + equal._codegen(state) + self.whitespace_after_expression._codegen(state) + conversion = self.conversion + if conversion is not None: + state.add_token("!") + state.add_token(conversion) + format_spec = self.format_spec + if format_spec is not None: + state.add_token(":") + for spec in format_spec: + spec._codegen(state) + state.add_token("}") + + +@add_slots +@dataclass(frozen=True) +class TemplatedString(_BasePrefixedString): + """ + An "t-string". Template strings are a generalization of f-strings, + using a t in place of the f prefix. Instead of evaluating to str, + t-strings evaluate to a new type: Template + + T-Strings are defined in 'PEP 750' + + >>> import libcst as cst + >>> cst.parse_expression('t"ab{cd}ef"') + TemplatedString( + parts=[ + TemplatedStringText( + value='ab', + ), + TemplatedStringExpression( + expression=Name( + value='cd', + lpar=[], + rpar=[], + ), + conversion=None, + format_spec=None, + whitespace_before_expression=SimpleWhitespace( + value='', + ), + whitespace_after_expression=SimpleWhitespace( + value='', + ), + equal=None, + ), + TemplatedStringText( + value='ef', + ), + ], + start='t"', + end='"', + lpar=[], + rpar=[], + ) + >>> + """ + + #: A templated string is composed as a series of :class:`TemplatedStringText` and + #: :class:`TemplatedStringExpression` parts. + parts: Sequence[BaseTemplatedStringContent] + + #: The string prefix and the leading quote, such as ``t"``, ``T'``, ``tr"``, or + #: ``t"""``. + start: str = 't"' + + #: The trailing quote. This must match the type of quote used in ``start``. + end: Literal['"', "'", '"""', "'''"] = '"' + + lpar: Sequence[LeftParen] = () + #: Sequence of parenthesis for precidence dictation. + rpar: Sequence[RightParen] = () + + def _validate(self) -> None: + super(_BasePrefixedString, self)._validate() + + # Validate any prefix + prefix = self.prefix + if prefix not in ("t", "tr", "rt"): + raise CSTValidationError("Invalid t-string prefix.") + + # Validate wrapping quotes + starttoken = self.start[len(prefix) :] + if starttoken != self.end: + raise CSTValidationError("t-string must have matching enclosing quotes.") + + # Validate valid wrapping quote usage + if starttoken not in ('"', "'", '"""', "'''"): + raise CSTValidationError("Invalid t-string enclosing quotes.") + + @property + def prefix(self) -> str: + """ + Returns the string's prefix, if any exists. The prefix can be ``t``, + ``tr``, or ``rt``. + """ + + prefix = "" + for c in self.start: + if c in ['"', "'"]: + break + prefix += c + return prefix.lower() + + @property + def quote(self) -> StringQuoteLiteral: + """ + Returns the quotation used to denote the string. Can be either ``'``, + ``"``, ``'''`` or ``\"\"\"``. + """ + + return self.end + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TemplatedString": + return TemplatedString( + lpar=visit_sequence(self, "lpar", self.lpar, visitor), + start=self.start, + parts=visit_sequence(self, "parts", self.parts, visitor), + end=self.end, + rpar=visit_sequence(self, "rpar", self.rpar, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + with self._parenthesize(state): + state.add_token(self.start) + for part in self.parts: + part._codegen(state) + state.add_token(self.end) + + @add_slots @dataclass(frozen=True) class ConcatenatedString(BaseString): @@ -998,7 +1259,7 @@ class ConcatenatedString(BaseString): elif isinstance(right, FormattedString): rightbytes = "b" in right.prefix else: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") if leftbytes != rightbytes: raise CSTValidationError("Cannot concatenate string and bytes.") @@ -1020,7 +1281,7 @@ class ConcatenatedString(BaseString): self.right._codegen(state) @property - def evaluated_value(self) -> Optional[str]: + def evaluated_value(self) -> Union[str, bytes, None]: """ Return an :func:`ast.literal_eval` evaluated str of recursively concatenated :py:attr:`left` and :py:attr:`right` if and only if both :py:attr:`left` and :py:attr:`right` are composed by :class:`SimpleString` or :class:`ConcatenatedString` @@ -1034,7 +1295,11 @@ class ConcatenatedString(BaseString): right_val = right.evaluated_value if right_val is None: return None - return left_val + right_val + if isinstance(left_val, bytes) and isinstance(right_val, bytes): + return left_val + right_val + if isinstance(left_val, str) and isinstance(right_val, str): + return left_val + right_val + return None @add_slots @@ -1415,6 +1680,8 @@ class BaseSlice(CSTNode, ABC): This node is purely for typing. """ + __slots__ = () + @add_slots @dataclass(frozen=True) @@ -1427,10 +1694,29 @@ class Index(BaseSlice): #: The index value itself. value: BaseExpression + #: An optional string with an asterisk appearing before the name. This is + #: expanded into variable number of positional arguments. See PEP-646 + star: Optional[Literal["*"]] = None + + #: Whitespace after the ``star`` (if it exists), but before the ``value``. + whitespace_after_star: Optional[BaseParenthesizableWhitespace] = None + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Index": - return Index(value=visit_required(self, "value", self.value, visitor)) + return Index( + star=self.star, + whitespace_after_star=visit_optional( + self, "whitespace_after_star", self.whitespace_after_star, visitor + ), + value=visit_required(self, "value", self.value, visitor), + ) def _codegen_impl(self, state: CodegenState) -> None: + star = self.star + if star is not None: + state.add_token(star) + ws = self.whitespace_after_star + if ws is not None: + ws._codegen(state) self.value._codegen(state) @@ -1610,9 +1896,9 @@ class Annotation(CSTNode): #: colon or arrow. annotation: BaseExpression - whitespace_before_indicator: Union[ - BaseParenthesizableWhitespace, MaybeSentinel - ] = MaybeSentinel.DEFAULT + whitespace_before_indicator: Union[BaseParenthesizableWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) whitespace_after_indicator: BaseParenthesizableWhitespace = SimpleWhitespace.field( " " ) @@ -1651,7 +1937,7 @@ class Annotation(CSTNode): if default_indicator == "->": state.add_token(" ") else: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") # Now, output the indicator and the rest of the annotation state.add_token(default_indicator) @@ -1696,15 +1982,26 @@ class ParamSlash(CSTNode): .. _PEP 570: https://www.python.org/dev/peps/pep-0570/#specification """ - # Optional comma that comes after the slash. + #: Optional comma that comes after the slash. This comma doesn't own the whitespace + #: between ``/`` and ``,``. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT + #: Whitespace after the ``/`` character. This is captured here in case there is a + #: comma. + whitespace_after: BaseParenthesizableWhitespace = SimpleWhitespace.field("") + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ParamSlash": - return ParamSlash(comma=visit_sentinel(self, "comma", self.comma, visitor)) + return ParamSlash( + comma=visit_sentinel(self, "comma", self.comma, visitor), + whitespace_after=visit_required( + self, "whitespace_after", self.whitespace_after, visitor + ), + ) def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: state.add_token("/") + self.whitespace_after._codegen(state) comma = self.comma if comma is MaybeSentinel.DEFAULT and default_comma: state.add_token(", ") @@ -1844,7 +2141,6 @@ class Parameters(CSTNode): if len(vals) == 0: return for val in vals: - # pyre-ignore Pyre seems to think val.star.__eq__ is not callable if isinstance(val.star, str) and val.star != "": raise CSTValidationError( f"Expecting a star prefix of '' for {section} Param." @@ -1864,6 +2160,8 @@ class Parameters(CSTNode): def _validate_defaults(self) -> None: seen_default = False + # pyre-fixme[60]: Concatenation not yet support for multiple variadic + # tuples: `*self.posonly_params, *self.params`. for param in (*self.posonly_params, *self.params): if param.default: # Mark that we've moved onto defaults @@ -1891,7 +2189,6 @@ class Parameters(CSTNode): if ( isinstance(star_arg, Param) and isinstance(star_arg.star, str) - # pyre-ignore Pyre seems to think star_kwarg.star.__eq__ is not callable and star_arg.star != "*" ): raise CSTValidationError( @@ -1903,7 +2200,6 @@ class Parameters(CSTNode): if ( star_kwarg is not None and isinstance(star_kwarg.star, str) - # pyre-ignore Pyre seems to think star_kwarg.star.__eq__ is not callable and star_kwarg.star != "**" ): raise CSTValidationError( @@ -1934,6 +2230,25 @@ class Parameters(CSTNode): star_kwarg=visit_optional(self, "star_kwarg", self.star_kwarg, visitor), ) + def _safe_to_join_with_lambda(self) -> bool: + """ + Determine if Parameters need a space after the `lambda` keyword. Returns True + iff it's safe to omit the space between `lambda` and these Parameters. + + See also `BaseExpression._safe_to_use_with_word_operator`. + + For example: `lambda*_: pass` + """ + if len(self.posonly_params) != 0: + return False + + # posonly_ind can't appear if above condition is false + + if len(self.params) > 0 and self.params[0].star not in {"*", "**"}: + return False + + return True + def _codegen_impl(self, state: CodegenState) -> None: # noqa: C901 # Compute the star existence first so we can ask about whether # each element is the last in the list or not. @@ -2035,9 +2350,16 @@ class Lambda(BaseExpression): rpar: Sequence[RightParen] = () #: Whitespace after the lambda keyword, but before any argument or the colon. - whitespace_after_lambda: Union[ - BaseParenthesizableWhitespace, MaybeSentinel - ] = MaybeSentinel.DEFAULT + whitespace_after_lambda: Union[BaseParenthesizableWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) + + def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: + if position == ExpressionPosition.LEFT: + return len(self.rpar) > 0 or self.body._safe_to_use_with_word_operator( + position + ) + return super()._safe_to_use_with_word_operator(position) def _validate(self) -> None: # Validate parents @@ -2066,6 +2388,7 @@ class Lambda(BaseExpression): if ( isinstance(whitespace_after_lambda, BaseParenthesizableWhitespace) and whitespace_after_lambda.empty + and not self.params._safe_to_join_with_lambda() ): raise CSTValidationError( "Must have at least one space after lambda when specifying params" @@ -2191,12 +2514,12 @@ class _BaseExpressionWithArgs(BaseExpression, ABC): in typing. So, we have common validation functions here. """ + __slots__ = () + #: Sequence of arguments that will be passed to the function call. args: Sequence[Arg] = () - def _check_kwargs_or_keywords( - self, arg: Arg - ) -> Optional[Callable[[Arg], Callable]]: + def _check_kwargs_or_keywords(self, arg: Arg) -> None: """ Validates that we only have a mix of "keyword=arg" and "**arg" expansion. """ @@ -2220,7 +2543,7 @@ class _BaseExpressionWithArgs(BaseExpression, ABC): def _check_starred_or_keywords( self, arg: Arg - ) -> Optional[Callable[[Arg], Callable]]: + ) -> Optional[Callable[[Arg], Callable[[Arg], None]]]: """ Validates that we only have a mix of "*arg" expansion and "keyword=arg". """ @@ -2243,7 +2566,9 @@ class _BaseExpressionWithArgs(BaseExpression, ABC): "Cannot have positional argument after keyword argument." ) - def _check_positional(self, arg: Arg) -> Optional[Callable[[Arg], Callable]]: + def _check_positional( + self, arg: Arg + ) -> Optional[Callable[[Arg], Callable[[Arg], Callable[[Arg], None]]]]: """ Validates that we only have a mix of positional args and "*arg" expansion. """ @@ -2267,6 +2592,8 @@ class _BaseExpressionWithArgs(BaseExpression, ABC): # Valid, allowed to have positional arguments here return None + # pyre-fixme[30]: Pyre gave up inferring some types - function `_validate` was + # too complex. def _validate(self) -> None: # Validate any super-class stuff, whatever it may be. super()._validate() @@ -2380,7 +2707,12 @@ class Await(BaseExpression): # Validate any super-class stuff, whatever it may be. super(Await, self)._validate() # Make sure we don't run identifiers together. - if self.whitespace_after_await.empty: + if ( + self.whitespace_after_await.empty + and not self.expression._safe_to_use_with_word_operator( + ExpressionPosition.RIGHT + ) + ): raise CSTValidationError("Must have at least one space after await") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Await": @@ -2434,6 +2766,12 @@ class IfExp(BaseExpression): #: Whitespace after the ``else`` keyword, but before the ``orelse`` expression. whitespace_after_else: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") + def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: + if position == ExpressionPosition.RIGHT: + return self.body._safe_to_use_with_word_operator(position) + else: + return self.orelse._safe_to_use_with_word_operator(position) + def _validate(self) -> None: # Paren validation and such super(IfExp, self)._validate() @@ -2512,9 +2850,9 @@ class From(CSTNode): item: BaseExpression #: The whitespace at the very start of this node. - whitespace_before_from: Union[ - BaseParenthesizableWhitespace, MaybeSentinel - ] = MaybeSentinel.DEFAULT + whitespace_before_from: Union[BaseParenthesizableWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) #: The whitespace after the ``from`` keyword, but before the ``item``. whitespace_after_from: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") @@ -2573,9 +2911,9 @@ class Yield(BaseExpression): rpar: Sequence[RightParen] = () #: Whitespace after the ``yield`` keyword, but before the ``value``. - whitespace_after_yield: Union[ - BaseParenthesizableWhitespace, MaybeSentinel - ] = MaybeSentinel.DEFAULT + whitespace_after_yield: Union[BaseParenthesizableWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) def _validate(self) -> None: # Paren rules and such @@ -2630,6 +2968,8 @@ class _BaseElementImpl(CSTNode, ABC): An internal base class for :class:`Element` and :class:`DictElement`. """ + __slots__ = () + value: BaseExpression comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT @@ -2657,8 +2997,7 @@ class _BaseElementImpl(CSTNode, ABC): state: CodegenState, default_comma: bool = False, default_comma_whitespace: bool = False, # False for a single-item collection - ) -> None: - ... + ) -> None: ... class BaseElement(_BaseElementImpl, ABC): @@ -2667,6 +3006,8 @@ class BaseElement(_BaseElementImpl, ABC): BaseDictElement. """ + __slots__ = () + class BaseDictElement(_BaseElementImpl, ABC): """ @@ -2674,6 +3015,8 @@ class BaseDictElement(_BaseElementImpl, ABC): BaseElement. """ + __slots__ = () + @add_slots @dataclass(frozen=True) @@ -2760,7 +3103,7 @@ class DictElement(BaseDictElement): @add_slots @dataclass(frozen=True) -class StarredElement(BaseElement, _BaseParenthesizedNode): +class StarredElement(BaseElement, BaseExpression, _BaseParenthesizedNode): """ A starred ``*value`` element that expands to represent multiple values in a literal :class:`List`, :class:`Tuple`, or :class:`Set`. @@ -2956,6 +3299,8 @@ class BaseList(BaseExpression, ABC): object when evaluated. """ + __slots__ = () + lbracket: LeftSquareBracket = LeftSquareBracket.field() #: Brackets surrounding the list. rbracket: RightSquareBracket = RightSquareBracket.field() @@ -3036,6 +3381,8 @@ class _BaseSetOrDict(BaseExpression, ABC): shouldn't be exported. """ + __slots__ = () + lbrace: LeftCurlyBrace = LeftCurlyBrace.field() #: Braces surrounding the set or dict. rbrace: RightCurlyBrace = RightCurlyBrace.field() @@ -3061,6 +3408,8 @@ class BaseSet(_BaseSetOrDict, ABC): a set object when evaluated. """ + __slots__ = () + @add_slots @dataclass(frozen=True) @@ -3130,6 +3479,8 @@ class BaseDict(_BaseSetOrDict, ABC): a dict object when evaluated. """ + __slots__ = () + @add_slots @dataclass(frozen=True) @@ -3406,6 +3757,8 @@ class BaseComp(BaseExpression, ABC): :class:`GeneratorExp`, :class:`ListComp`, :class:`SetComp`, and :class:`DictComp`. """ + __slots__ = () + for_in: CompFor @@ -3416,10 +3769,12 @@ class BaseSimpleComp(BaseComp, ABC): ``value``. """ + __slots__ = () + #: The expression evaluated during each iteration of the comprehension. This #: lexically comes before the ``for_in`` clause, but it is semantically the #: inner-most element, evaluated inside the ``for_in`` clause. - elt: BaseAssignTargetExpression + elt: BaseExpression #: The ``for ... in ... if ...`` clause that lexically comes after ``elt``. This may #: be a nested structure for nested comprehensions. See :class:`CompFor` for @@ -3452,7 +3807,7 @@ class GeneratorExp(BaseSimpleComp): """ #: The expression evaluated and yielded during each iteration of the generator. - elt: BaseAssignTargetExpression + elt: BaseExpression #: The ``for ... in ... if ...`` clause that comes after ``elt``. This may be a #: nested structure for nested comprehensions. See :class:`CompFor` for details. @@ -3503,7 +3858,7 @@ class ListComp(BaseList, BaseSimpleComp): """ #: The expression evaluated and stored during each iteration of the comprehension. - elt: BaseAssignTargetExpression + elt: BaseExpression #: The ``for ... in ... if ...`` clause that comes after ``elt``. This may be a #: nested structure for nested comprehensions. See :class:`CompFor` for details. @@ -3545,7 +3900,7 @@ class SetComp(BaseSet, BaseSimpleComp): """ #: The expression evaluated and stored during each iteration of the comprehension. - elt: BaseAssignTargetExpression + elt: BaseExpression #: The ``for ... in ... if ...`` clause that comes after ``elt``. This may be a #: nested structure for nested comprehensions. See :class:`CompFor` for details. @@ -3587,10 +3942,10 @@ class DictComp(BaseDict, BaseComp): """ #: The key inserted into the dictionary during each iteration of the comprehension. - key: BaseAssignTargetExpression + key: BaseExpression #: The value associated with the ``key`` inserted into the dictionary during each #: iteration of the comprehension. - value: BaseAssignTargetExpression + value: BaseExpression #: The ``for ... in ... if ...`` clause that lexically comes after ``key`` and #: ``value``. This may be a nested structure for nested comprehensions. See @@ -3694,6 +4049,15 @@ class NamedExpr(BaseExpression): rpar=visit_sequence(self, "rpar", self.rpar, visitor), ) + def _safe_to_use_with_word_operator(self, position: ExpressionPosition) -> bool: + if position == ExpressionPosition.LEFT: + return len(self.rpar) > 0 or self.value._safe_to_use_with_word_operator( + position + ) + return len(self.lpar) > 0 or self.target._safe_to_use_with_word_operator( + position + ) + def _codegen_impl(self, state: CodegenState) -> None: with self._parenthesize(state): self.target._codegen(state) diff --git a/libcst/_nodes/internal.py b/libcst/_nodes/internal.py index 4b5c7b00..35d89743 100644 --- a/libcst/_nodes/internal.py +++ b/libcst/_nodes/internal.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,14 +6,14 @@ from contextlib import contextmanager from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Iterable, Iterator, List, Optional, Sequence, Union +from typing import Iterable, Iterator, List, Optional, Sequence, TYPE_CHECKING, Union from libcst._add_slots import add_slots +from libcst._flatten_sentinel import FlattenSentinel from libcst._maybe_sentinel import MaybeSentinel from libcst._removal_sentinel import RemovalSentinel from libcst._types import CSTNodeT - if TYPE_CHECKING: # These are circular dependencies only used for typing purposes from libcst._nodes.base import CSTNode # noqa: F401 @@ -84,6 +84,13 @@ def visit_required( f"We got a RemovalSentinel while visiting a {type(node).__name__}. This " + "node's parent does not allow it to be removed." ) + elif isinstance(result, FlattenSentinel): + raise TypeError( + f"We got a FlattenSentinel while visiting a {type(node).__name__}. This " + + "node's parent does not allow for it to be it to be replaced with a " + + "sequence." + ) + visitor.on_leave_attribute(parent, fieldname) return result @@ -101,6 +108,12 @@ def visit_optional( return None visitor.on_visit_attribute(parent, fieldname) result = node.visit(visitor) + if isinstance(result, FlattenSentinel): + raise TypeError( + f"We got a FlattenSentinel while visiting a {type(node).__name__}. This " + + "node's parent does not allow for it to be it to be replaced with a " + + "sequence." + ) visitor.on_leave_attribute(parent, fieldname) return None if isinstance(result, RemovalSentinel) else result @@ -121,6 +134,12 @@ def visit_sentinel( return MaybeSentinel.DEFAULT visitor.on_visit_attribute(parent, fieldname) result = node.visit(visitor) + if isinstance(result, FlattenSentinel): + raise TypeError( + f"We got a FlattenSentinel while visiting a {type(node).__name__}. This " + + "node's parent does not allow for it to be it to be replaced with a " + + "sequence." + ) visitor.on_leave_attribute(parent, fieldname) return MaybeSentinel.DEFAULT if isinstance(result, RemovalSentinel) else result @@ -138,7 +157,9 @@ def visit_iterable( visitor.on_visit_attribute(parent, fieldname) for child in children: new_child = child.visit(visitor) - if not isinstance(new_child, RemovalSentinel): + if isinstance(new_child, FlattenSentinel): + yield from new_child + elif not isinstance(new_child, RemovalSentinel): yield new_child visitor.on_leave_attribute(parent, fieldname) @@ -179,11 +200,17 @@ def visit_body_iterable( # and the new child is. This means a RemovalSentinel # caused a child of this node to be dropped, and it # is now useless. - if (not child._is_removable()) and new_child._is_removable(): - continue - # Safe to yield child in this case. - yield new_child + if isinstance(new_child, FlattenSentinel): + for child_ in new_child: + if (not child._is_removable()) and child_._is_removable(): + continue + yield child_ + else: + if (not child._is_removable()) and new_child._is_removable(): + continue + # Safe to yield child in this case. + yield new_child visitor.on_leave_attribute(parent, fieldname) diff --git a/libcst/_nodes/module.py b/libcst/_nodes/module.py index 59a4507d..9ed45716 100644 --- a/libcst/_nodes/module.py +++ b/libcst/_nodes/module.py @@ -1,24 +1,23 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass -from typing import TYPE_CHECKING, Optional, Sequence, TypeVar, Union, cast +from typing import cast, Optional, Sequence, TYPE_CHECKING, TypeVar, Union from libcst._add_slots import add_slots from libcst._nodes.base import CSTNode from libcst._nodes.internal import CodegenState, visit_body_sequence, visit_sequence from libcst._nodes.statement import ( BaseCompoundStatement, - SimpleStatementLine, get_docstring_impl, + SimpleStatementLine, ) from libcst._nodes.whitespace import EmptyLine from libcst._removal_sentinel import RemovalSentinel from libcst._visitors import CSTVisitorT - if TYPE_CHECKING: # This is circular, so import the type only in type checking from libcst._parser.types.config import PartialParserConfig @@ -80,7 +79,6 @@ class Module(CSTNode): has_trailing_newline=self.has_trailing_newline, ) - # pyre-fixme[14]: `visit` overrides method defined in `CSTNode` inconsistently. def visit(self: _ModuleSelfT, visitor: CSTVisitorT) -> _ModuleSelfT: """ Returns the result of running a visitor over this module. diff --git a/libcst/_nodes/op.py b/libcst/_nodes/op.py index 54fdd73c..1765f536 100644 --- a/libcst/_nodes/op.py +++ b/libcst/_nodes/op.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -19,6 +19,8 @@ class _BaseOneTokenOp(CSTNode, ABC): Any node that has a static value and needs to own whitespace on both sides. """ + __slots__ = () + whitespace_before: BaseParenthesizableWhitespace whitespace_after: BaseParenthesizableWhitespace @@ -41,8 +43,7 @@ class _BaseOneTokenOp(CSTNode, ABC): self.whitespace_after._codegen(state) @abstractmethod - def _get_token(self) -> str: - ... + def _get_token(self) -> str: ... class _BaseTwoTokenOp(CSTNode, ABC): @@ -51,6 +52,8 @@ class _BaseTwoTokenOp(CSTNode, ABC): in beteween them. """ + __slots__ = () + whitespace_before: BaseParenthesizableWhitespace whitespace_between: BaseParenthesizableWhitespace @@ -84,8 +87,7 @@ class _BaseTwoTokenOp(CSTNode, ABC): self.whitespace_after._codegen(state) @abstractmethod - def _get_tokens(self) -> Tuple[str, str]: - ... + def _get_tokens(self) -> Tuple[str, str]: ... class BaseUnaryOp(CSTNode, ABC): @@ -93,6 +95,8 @@ class BaseUnaryOp(CSTNode, ABC): Any node that has a static value used in a :class:`UnaryOperation` expression. """ + __slots__ = () + #: Any space that appears directly after this operator. whitespace_after: BaseParenthesizableWhitespace @@ -109,8 +113,7 @@ class BaseUnaryOp(CSTNode, ABC): self.whitespace_after._codegen(state) @abstractmethod - def _get_token(self) -> str: - ... + def _get_token(self) -> str: ... class BaseBooleanOp(_BaseOneTokenOp, ABC): @@ -119,6 +122,8 @@ class BaseBooleanOp(_BaseOneTokenOp, ABC): This node is purely for typing. """ + __slots__ = () + class BaseBinaryOp(CSTNode, ABC): """ @@ -126,6 +131,8 @@ class BaseBinaryOp(CSTNode, ABC): This node is purely for typing. """ + __slots__ = () + class BaseCompOp(CSTNode, ABC): """ @@ -133,6 +140,8 @@ class BaseCompOp(CSTNode, ABC): This node is purely for typing. """ + __slots__ = () + class BaseAugOp(CSTNode, ABC): """ @@ -140,6 +149,8 @@ class BaseAugOp(CSTNode, ABC): This node is purely for typing. """ + __slots__ = () + @add_slots @dataclass(frozen=True) diff --git a/libcst/_nodes/statement.py b/libcst/_nodes/statement.py index 6a831b85..cdc49edc 100644 --- a/libcst/_nodes/statement.py +++ b/libcst/_nodes/statement.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,13 +6,16 @@ import inspect import re from abc import ABC, abstractmethod -from dataclasses import dataclass -from typing import Optional, Pattern, Sequence, Union +from dataclasses import dataclass, field +from typing import Literal, Optional, Pattern, Sequence, Union + +from libcst import CSTLogicError from libcst._add_slots import add_slots from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.base import CSTNode, CSTValidationError from libcst._nodes.expression import ( + _BaseParenthesizedNode, Annotation, Arg, Asynchronous, @@ -20,15 +23,18 @@ from libcst._nodes.expression import ( BaseAssignTargetExpression, BaseDelTargetExpression, BaseExpression, - Call, ConcatenatedString, ExpressionPosition, From, + LeftCurlyBrace, LeftParen, + LeftSquareBracket, List, Name, Parameters, + RightCurlyBrace, RightParen, + RightSquareBracket, SimpleString, Tuple, ) @@ -40,16 +46,25 @@ from libcst._nodes.internal import ( visit_sentinel, visit_sequence, ) -from libcst._nodes.op import AssignEqual, BaseAugOp, Comma, Dot, ImportStar, Semicolon +from libcst._nodes.op import ( + AssignEqual, + BaseAugOp, + BitOr, + Colon, + Comma, + Dot, + ImportStar, + Semicolon, +) from libcst._nodes.whitespace import ( BaseParenthesizableWhitespace, EmptyLine, + ParenthesizedWhitespace, SimpleWhitespace, TrailingWhitespace, ) from libcst._visitors import CSTVisitorT - _INDENT_WHITESPACE_RE: Pattern[str] = re.compile(r"[ \f\t]+", re.UNICODE) @@ -66,6 +81,8 @@ class BaseSuite(CSTNode, ABC): -- https://docs.python.org/3/reference/compound_stmts.html """ + __slots__ = () + body: Union[Sequence["BaseStatement"], Sequence["BaseSmallStatement"]] @@ -75,7 +92,7 @@ class BaseStatement(CSTNode, ABC): in a particular location. """ - pass + __slots__ = () class BaseSmallStatement(CSTNode, ABC): @@ -86,6 +103,8 @@ class BaseSmallStatement(CSTNode, ABC): simplify type definitions and isinstance checks. """ + __slots__ = () + #: An optional semicolon that appears after a small statement. This is optional #: for the last small statement in a :class:`SimpleStatementLine` or #: :class:`SimpleStatementSuite`, but all other small statements inside a simple @@ -96,8 +115,7 @@ class BaseSmallStatement(CSTNode, ABC): @abstractmethod def _codegen_impl( self, state: CodegenState, default_semicolon: bool = False - ) -> None: - ... + ) -> None: ... @add_slots @@ -256,9 +274,9 @@ class Return(BaseSmallStatement): #: Optional whitespace after the ``return`` keyword before the optional #: value expression. - whitespace_after_return: Union[ - SimpleWhitespace, MaybeSentinel - ] = MaybeSentinel.DEFAULT + whitespace_after_return: Union[SimpleWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. @@ -357,6 +375,8 @@ class _BaseSimpleStatement(CSTNode, ABC): small statement. """ + __slots__ = () + #: Sequence of small statements. All but the last statement are required to have #: a semicolon. body: Sequence[BaseSmallStatement] @@ -541,6 +561,8 @@ class BaseCompoundStatement(BaseStatement, ABC): -- https://docs.python.org/3/reference/compound_stmts.html """ + __slots__ = () + #: The body of this compound statement. body: BaseSuite @@ -578,7 +600,12 @@ class If(BaseCompoundStatement): #: The whitespace appearing after the test expression but before the colon. whitespace_after_test: SimpleWhitespace = SimpleWhitespace.field("") - # TODO: _validate + def _validate(self) -> None: + if ( + self.whitespace_before_test.empty + and not self.test._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) + ): + raise CSTValidationError("Must have at least one space after 'if' keyword.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "If": return If( @@ -725,12 +752,13 @@ class AsName(CSTNode): whitespace_after_as: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") def _validate(self) -> None: - if self.whitespace_after_as.empty: + if ( + self.whitespace_after_as.empty + and not self.name._safe_to_use_with_word_operator(ExpressionPosition.RIGHT) + ): raise CSTValidationError( "There must be at least one space between 'as' and name." ) - if self.whitespace_before_as.empty: - raise CSTValidationError("There must be at least one space before 'as'.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "AsName": return AsName( @@ -794,6 +822,16 @@ class ExceptHandler(CSTNode): raise CSTValidationError( "Must have at least one space after except when ExceptHandler has a type." ) + name = self.name + if ( + type_ is not None + and name is not None + and name.whitespace_before_as.empty + and not type_._safe_to_use_with_word_operator(ExpressionPosition.LEFT) + ): + raise CSTValidationError( + "Must have at least one space before as keyword in an except handler." + ) def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ExceptHandler": return ExceptHandler( @@ -830,6 +868,82 @@ class ExceptHandler(CSTNode): self.body._codegen(state) +@add_slots +@dataclass(frozen=True) +class ExceptStarHandler(CSTNode): + """ + An ``except*`` clause that appears after a :class:`TryStar` statement. + """ + + #: The body of the except. + body: BaseSuite + + #: The type of exception this catches. Can be a tuple in some cases. + type: BaseExpression + + #: The optional name that a caught exception is assigned to. + name: Optional[AsName] = None + + #: Sequence of empty lines appearing before this compound statement line. + leading_lines: Sequence[EmptyLine] = () + + #: The whitespace between the ``except`` keyword and the star. + whitespace_after_except: SimpleWhitespace = SimpleWhitespace.field("") + + #: The whitespace between the star and the type. + whitespace_after_star: SimpleWhitespace = SimpleWhitespace.field(" ") + + #: The whitespace after any type or name node (whichever comes last) and + #: the colon. + whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") + + def _validate(self) -> None: + name = self.name + if name is not None and not isinstance(name.name, Name): + raise CSTValidationError( + "Must use a Name node for AsName name inside ExceptHandler." + ) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ExceptStarHandler": + return ExceptStarHandler( + leading_lines=visit_sequence( + self, "leading_lines", self.leading_lines, visitor + ), + whitespace_after_except=visit_required( + self, "whitespace_after_except", self.whitespace_after_except, visitor + ), + whitespace_after_star=visit_required( + self, "whitespace_after_star", self.whitespace_after_star, visitor + ), + type=visit_required(self, "type", self.type, visitor), + name=visit_optional(self, "name", self.name, visitor), + whitespace_before_colon=visit_required( + self, "whitespace_before_colon", self.whitespace_before_colon, visitor + ), + body=visit_required(self, "body", self.body, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + for ll in self.leading_lines: + ll._codegen(state) + state.add_indent_tokens() + + with state.record_syntactic_position(self, end_node=self.body): + state.add_token("except") + self.whitespace_after_except._codegen(state) + state.add_token("*") + self.whitespace_after_star._codegen(state) + typenode = self.type + if typenode is not None: + typenode._codegen(state) + namenode = self.name + if namenode is not None: + namenode._codegen(state) + self.whitespace_before_colon._codegen(state) + state.add_token(":") + self.body._codegen(state) + + @add_slots @dataclass(frozen=True) class Finally(CSTNode): @@ -874,7 +988,9 @@ class Finally(CSTNode): @dataclass(frozen=True) class Try(BaseCompoundStatement): """ - A ``try`` statement. + A regular ``try`` statement that cannot contain :class:`ExceptStar` blocks. For + ``try`` statements that can contain :class:`ExceptStar` blocks, see + :class:`TryStar`. """ #: The suite that is wrapped with a try statement. @@ -949,6 +1065,75 @@ class Try(BaseCompoundStatement): finalbody._codegen(state) +@add_slots +@dataclass(frozen=True) +class TryStar(BaseCompoundStatement): + """ + A ``try`` statement with ``except*`` blocks. + """ + + #: The suite that is wrapped with a try statement. + body: BaseSuite + + #: A list of one or more exception handlers. + handlers: Sequence[ExceptStarHandler] + + #: An optional else case. + orelse: Optional[Else] = None + + #: An optional finally case. + finalbody: Optional[Finally] = None + + #: Sequence of empty lines appearing before this compound statement line. + leading_lines: Sequence[EmptyLine] = () + + #: The whitespace that appears after the ``try`` keyword but before + #: the colon. + whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") + + def _validate(self) -> None: + if len(self.handlers) == 0: + raise CSTValidationError( + "A TryStar statement must have at least one ExceptHandler" + ) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TryStar": + return TryStar( + leading_lines=visit_sequence( + self, "leading_lines", self.leading_lines, visitor + ), + whitespace_before_colon=visit_required( + self, "whitespace_before_colon", self.whitespace_before_colon, visitor + ), + body=visit_required(self, "body", self.body, visitor), + handlers=visit_sequence(self, "handlers", self.handlers, visitor), + orelse=visit_optional(self, "orelse", self.orelse, visitor), + finalbody=visit_optional(self, "finalbody", self.finalbody, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + for ll in self.leading_lines: + ll._codegen(state) + state.add_indent_tokens() + + end_node = self.handlers[-1] + orelse = self.orelse + end_node = end_node if orelse is None else orelse + finalbody = self.finalbody + end_node = end_node if finalbody is None else finalbody + with state.record_syntactic_position(self, end_node=end_node): + state.add_token("try") + self.whitespace_before_colon._codegen(state) + state.add_token(":") + self.body._codegen(state) + for handler in self.handlers: + handler._codegen(state) + if orelse is not None: + orelse._codegen(state) + if finalbody is not None: + finalbody._codegen(state) + + @add_slots @dataclass(frozen=True) class ImportAlias(CSTNode): @@ -971,18 +1156,21 @@ class ImportAlias(CSTNode): def _validate(self) -> None: asname = self.asname - if asname is not None and not isinstance(asname.name, Name): - raise CSTValidationError( - "Must use a Name node for AsName name inside ImportAlias." - ) + if asname is not None: + if not isinstance(asname.name, Name): + raise CSTValidationError( + "Must use a Name node for AsName name inside ImportAlias." + ) + if asname.whitespace_before_as.empty: + raise CSTValidationError( + "Must have at least one space before as keyword in an ImportAlias." + ) try: self.evaluated_name - except Exception as e: - if str(e) == "Logic error!": - raise CSTValidationError( - "The imported name must be a valid qualified name." - ) - raise e + except CSTLogicError as e: + raise CSTValidationError( + "The imported name must be a valid qualified name." + ) from e def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ImportAlias": return ImportAlias( @@ -1011,7 +1199,7 @@ class ImportAlias(CSTNode): elif isinstance(node, Attribute): return f"{self._name(node.value)}.{node.attr.value}" else: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") @property def evaluated_name(self) -> str: @@ -1438,7 +1626,7 @@ class Decorator(CSTNode): #: The decorator that will return a new function wrapping the parent #: of this decorator. - decorator: Union[Name, Attribute, Call] + decorator: BaseExpression #: Line comments and empty lines before this decorator. The parent #: :class:`FunctionDef` or :class:`ClassDef` node owns leading lines before @@ -1451,19 +1639,6 @@ class Decorator(CSTNode): #: Optional trailing comment and newline following the decorator before the next line. trailing_whitespace: TrailingWhitespace = TrailingWhitespace.field() - def _validate(self) -> None: - decorator = self.decorator - if len(decorator.lpar) > 0 or len(decorator.rpar) > 0: - raise CSTValidationError( - "Cannot have parens around decorator in a Decorator." - ) - if isinstance(decorator, Call) and not isinstance( - decorator.func, (Name, Attribute) - ): - raise CSTValidationError( - "Decorator call function must be Name or Attribute node." - ) - def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Decorator": return Decorator( leading_lines=visit_sequence( @@ -1519,6 +1694,8 @@ def get_docstring_impl( evaluated_value = val.evaluated_value else: return None + if isinstance(evaluated_value, bytes): + return None if evaluated_value is not None and clean: return inspect.cleandoc(evaluated_value) @@ -1566,8 +1743,8 @@ class FunctionDef(BaseCompoundStatement): #: Whitespace after the ``def`` keyword and before the function name. whitespace_after_def: SimpleWhitespace = SimpleWhitespace.field(" ") - #: Whitespace after the function name and before the opening parenthesis for - #: the parameters. + #: Whitespace after the function name and before the type parameters or the opening + #: parenthesis for the parameters. whitespace_after_name: SimpleWhitespace = SimpleWhitespace.field("") #: Whitespace after the opening parenthesis for the parameters but before @@ -1578,6 +1755,13 @@ class FunctionDef(BaseCompoundStatement): #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") + #: An optional declaration of type parameters. + type_parameters: Optional["TypeParameters"] = None + + #: Whitespace between the type parameters and the opening parenthesis for the + #: (non-type) parameters. + whitespace_after_type_parameters: SimpleWhitespace = SimpleWhitespace.field("") + def _validate(self) -> None: if len(self.name.lpar) > 0 or len(self.name.rpar) > 0: raise CSTValidationError("Cannot have parens around Name in a FunctionDef.") @@ -1586,6 +1770,15 @@ class FunctionDef(BaseCompoundStatement): "There must be at least one space between 'def' and name." ) + if ( + self.type_parameters is None + and not self.whitespace_after_type_parameters.empty + ): + raise CSTValidationError( + "whitespace_after_type_parameters must be empty if there are no type " + "parameters in FunctionDef" + ) + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "FunctionDef": return FunctionDef( leading_lines=visit_sequence( @@ -1605,6 +1798,15 @@ class FunctionDef(BaseCompoundStatement): whitespace_after_name=visit_required( self, "whitespace_after_name", self.whitespace_after_name, visitor ), + type_parameters=visit_optional( + self, "type_parameters", self.type_parameters, visitor + ), + whitespace_after_type_parameters=visit_required( + self, + "whitespace_after_type_parameters", + self.whitespace_after_type_parameters, + visitor, + ), whitespace_before_params=visit_required( self, "whitespace_before_params", self.whitespace_before_params, visitor ), @@ -1633,6 +1835,10 @@ class FunctionDef(BaseCompoundStatement): self.whitespace_after_def._codegen(state) self.name._codegen(state) self.whitespace_after_name._codegen(state) + type_params = self.type_parameters + if type_params is not None: + type_params._codegen(state) + self.whitespace_after_type_parameters._codegen(state) state.add_token("(") self.whitespace_before_params._codegen(state) self.params._codegen(state) @@ -1694,19 +1900,34 @@ class ClassDef(BaseCompoundStatement): #: Whitespace after the ``class`` keyword and before the class name. whitespace_after_class: SimpleWhitespace = SimpleWhitespace.field(" ") - #: Whitespace after the class name and before the opening parenthesis for - #: the bases and keywords. + #: Whitespace after the class name and before the type parameters or the opening + #: parenthesis for the bases and keywords. whitespace_after_name: SimpleWhitespace = SimpleWhitespace.field("") #: Whitespace after the closing parenthesis or class name and before #: the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") + #: An optional declaration of type parameters. + type_parameters: Optional["TypeParameters"] = None + + #: Whitespace between type parameters and opening parenthesis for the bases and + #: keywords. + whitespace_after_type_parameters: SimpleWhitespace = SimpleWhitespace.field("") + def _validate_whitespace(self) -> None: if self.whitespace_after_class.empty: raise CSTValidationError( "There must be at least one space between 'class' and name." ) + if ( + self.type_parameters is None + and not self.whitespace_after_type_parameters.empty + ): + raise CSTValidationError( + "whitespace_after_type_parameters must be empty if there are no type" + "parameters in a ClassDef" + ) def _validate_parens(self) -> None: if len(self.name.lpar) > 0 or len(self.name.rpar) > 0: @@ -1749,6 +1970,15 @@ class ClassDef(BaseCompoundStatement): whitespace_after_name=visit_required( self, "whitespace_after_name", self.whitespace_after_name, visitor ), + type_parameters=visit_optional( + self, "type_parameters", self.type_parameters, visitor + ), + whitespace_after_type_parameters=visit_required( + self, + "whitespace_after_type_parameters", + self.whitespace_after_type_parameters, + visitor, + ), lpar=visit_sentinel(self, "lpar", self.lpar, visitor), bases=visit_sequence(self, "bases", self.bases, visitor), keywords=visit_sequence(self, "keywords", self.keywords, visitor), @@ -1773,6 +2003,10 @@ class ClassDef(BaseCompoundStatement): self.whitespace_after_class._codegen(state) self.name._codegen(state) self.whitespace_after_name._codegen(state) + type_params = self.type_parameters + if type_params is not None: + type_params._codegen(state) + self.whitespace_after_type_parameters._codegen(state) lpar = self.lpar if isinstance(lpar, MaybeSentinel): if self.bases or self.keywords: @@ -1818,6 +2052,15 @@ class WithItem(CSTNode): #: other items inside a with block must contain a comma to separate them. comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT + def _validate(self) -> None: + asname = self.asname + if ( + asname is not None + and asname.whitespace_before_as.empty + and not self.item._safe_to_use_with_word_operator(ExpressionPosition.LEFT) + ): + raise CSTValidationError("Must have at least one space before as keyword.") + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "WithItem": return WithItem( item=visit_required(self, "item", self.item, visitor), @@ -1858,24 +2101,47 @@ class With(BaseCompoundStatement): #: Sequence of empty lines appearing before this with statement. leading_lines: Sequence[EmptyLine] = () + #: Optional open parenthesis for multi-line with bindings + lpar: Union[LeftParen, MaybeSentinel] = MaybeSentinel.DEFAULT + + #: Optional close parenthesis for multi-line with bindings + rpar: Union[RightParen, MaybeSentinel] = MaybeSentinel.DEFAULT + #: Whitespace after the ``with`` keyword and before the first item. whitespace_after_with: SimpleWhitespace = SimpleWhitespace.field(" ") #: Whitespace after the last item and before the colon. whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") + def _validate_parens(self) -> None: + if isinstance(self.lpar, MaybeSentinel) and isinstance(self.rpar, RightParen): + raise CSTValidationError( + "Do not mix concrete LeftParen/RightParen with MaybeSentinel." + ) + if isinstance(self.lpar, LeftParen) and isinstance(self.rpar, MaybeSentinel): + raise CSTValidationError( + "Do not mix concrete LeftParen/RightParen with MaybeSentinel." + ) + def _validate(self) -> None: + self._validate_parens() if len(self.items) == 0: raise CSTValidationError( "A With statement must have at least one WithItem." ) - if self.items[-1].comma != MaybeSentinel.DEFAULT: + if ( + isinstance(self.rpar, MaybeSentinel) + and self.items[-1].comma != MaybeSentinel.DEFAULT + ): raise CSTValidationError( - "The last WithItem in a With cannot have a trailing comma." + "The last WithItem in an unparenthesized With cannot have a trailing comma." ) - if self.whitespace_after_with.empty and not self.items[ - 0 - ].item._safe_to_use_with_word_operator(ExpressionPosition.RIGHT): + if self.whitespace_after_with.empty and not ( + isinstance(self.lpar, LeftParen) + or self.items[0].item._safe_to_use_with_word_operator( + ExpressionPosition.RIGHT + ) + ): raise CSTValidationError("Must have at least one space after with keyword.") def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "With": @@ -1889,7 +2155,9 @@ class With(BaseCompoundStatement): whitespace_after_with=visit_required( self, "whitespace_after_with", self.whitespace_after_with, visitor ), + lpar=visit_sentinel(self, "lpar", self.lpar, visitor), items=visit_sequence(self, "items", self.items, visitor), + rpar=visit_sentinel(self, "rpar", self.rpar, visitor), whitespace_before_colon=visit_required( self, "whitespace_before_colon", self.whitespace_before_colon, visitor ), @@ -1901,15 +2169,36 @@ class With(BaseCompoundStatement): ll._codegen(state) state.add_indent_tokens() + needs_paren = False + for item in self.items: + comma = item.comma + if isinstance(comma, Comma): + if isinstance( + comma.whitespace_after, + (EmptyLine, TrailingWhitespace, ParenthesizedWhitespace), + ): + needs_paren = True + break + with state.record_syntactic_position(self, end_node=self.body): asynchronous = self.asynchronous if asynchronous is not None: asynchronous._codegen(state) state.add_token("with") self.whitespace_after_with._codegen(state) + lpar = self.lpar + if isinstance(lpar, LeftParen): + lpar._codegen(state) + elif needs_paren: + state.add_token("(") last_item = len(self.items) - 1 for i, item in enumerate(self.items): item._codegen(state, default_comma=(i != last_item)) + rpar = self.rpar + if isinstance(rpar, RightParen): + rpar._codegen(state) + elif needs_paren: + state.add_token(")") self.whitespace_before_colon._codegen(state) state.add_token(":") self.body._codegen(state) @@ -2113,9 +2402,9 @@ class Raise(BaseSmallStatement): cause: Optional[From] = None #: Any whitespace appearing between the ``raise`` keyword and the exception. - whitespace_after_raise: Union[ - SimpleWhitespace, MaybeSentinel - ] = MaybeSentinel.DEFAULT + whitespace_after_raise: Union[SimpleWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) #: Optional semicolon when this is used in a statement line. This semicolon #: owns the whitespace on both sides of it when it is used. @@ -2149,7 +2438,6 @@ class Raise(BaseSmallStatement): not isinstance(whitespace_before_from, MaybeSentinel) and whitespace_before_from.empty ) - # pyre-ignore Pyre thinks exc is Optional if has_no_gap and not exc._safe_to_use_with_word_operator( ExpressionPosition.LEFT ): @@ -2420,3 +2708,1166 @@ class Nonlocal(BaseSmallStatement): state.add_token("; ") elif isinstance(semicolon, Semicolon): semicolon._codegen(state) + + +class MatchPattern(_BaseParenthesizedNode, ABC): + """ + A base class for anything that can appear as a pattern in a :class:`Match` + statement. + """ + + __slots__ = () + + +@add_slots +@dataclass(frozen=True) +# pyre-fixme[13]: Attribute `body` is never initialized. +class Match(BaseCompoundStatement): + """ + A ``match`` statement. + """ + + #: The subject of the match. + subject: BaseExpression + + #: A non-empty list of match cases. + cases: Sequence["MatchCase"] + + #: Sequence of empty lines appearing before this compound statement line. + leading_lines: Sequence[EmptyLine] = () + + #: Whitespace between the ``match`` keyword and the subject. + whitespace_after_match: SimpleWhitespace = SimpleWhitespace.field(" ") + + #: Whitespace after the subject but before the colon. + whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") + + #: Any optional trailing comment and the final ``NEWLINE`` at the end of the line. + whitespace_after_colon: TrailingWhitespace = TrailingWhitespace.field() + + #: A string represents a specific indentation. A ``None`` value uses the modules's + #: default indentation. This is included because indentation is allowed to be + #: inconsistent across a file, just not ambiguously. + indent: Optional[str] = None + + #: Any trailing comments or lines after the dedent that are owned by this match + #: block. Statements own preceeding and same-line trailing comments, but not + #: trailing lines, so it falls on :class:`Match` to own it. In the case + #: that a statement follows a :class:`Match` block, that statement will own the + #: comments and lines that are at the same indent as the statement, and this + #: :class:`Match` will own the comments and lines that are indented further. + footer: Sequence[EmptyLine] = () + + def _validate(self) -> None: + if len(self.cases) == 0: + raise CSTValidationError("A match statement must have at least one case.") + + indent = self.indent + if indent is not None: + if len(indent) == 0: + raise CSTValidationError( + "A match statement must have a non-zero width indent." + ) + if _INDENT_WHITESPACE_RE.fullmatch(indent) is None: + raise CSTValidationError( + "An indent must be composed of only whitespace characters." + ) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "Match": + return Match( + leading_lines=visit_sequence( + self, "leading_lines", self.leading_lines, visitor + ), + whitespace_after_match=visit_required( + self, "whitespace_after_match", self.whitespace_after_match, visitor + ), + subject=visit_required(self, "subject", self.subject, visitor), + whitespace_before_colon=visit_required( + self, "whitespace_before_colon", self.whitespace_before_colon, visitor + ), + whitespace_after_colon=visit_required( + self, "whitespace_after_colon", self.whitespace_after_colon, visitor + ), + indent=self.indent, + cases=visit_sequence(self, "cases", self.cases, visitor), + footer=visit_sequence(self, "footer", self.footer, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + for ll in self.leading_lines: + ll._codegen(state) + state.add_indent_tokens() + + with state.record_syntactic_position(self, end_node=self.cases[-1]): + state.add_token("match") + self.whitespace_after_match._codegen(state) + self.subject._codegen(state) + self.whitespace_before_colon._codegen(state) + state.add_token(":") + self.whitespace_after_colon._codegen(state) + + indent = self.indent + state.increase_indent(state.default_indent if indent is None else indent) + for c in self.cases: + c._codegen(state) + + for f in self.footer: + f._codegen(state) + + state.decrease_indent() + + +@add_slots +@dataclass(frozen=True) +class MatchCase(CSTNode): + """ + A single ``case`` block of a :class:`Match` statement. + """ + + #: The pattern that ``subject`` will be matched against. + pattern: MatchPattern + + #: The body of this case block, to be evaluated if ``pattern`` matches ``subject`` + #: and ``guard`` evaluates to a truthy value. + body: BaseSuite + + #: Optional expression that will be evaluated if ``pattern`` matches ``subject``. + guard: Optional[BaseExpression] = None + + #: Sequence of empty lines appearing before this case block. + leading_lines: Sequence[EmptyLine] = () + + #: Whitespace directly after the ``case`` keyword. + whitespace_after_case: SimpleWhitespace = SimpleWhitespace.field(" ") + + #: Whitespace before the ``if`` keyword in case there's a guard expression. + whitespace_before_if: SimpleWhitespace = SimpleWhitespace.field("") + + #: Whitespace after the ``if`` keyword in case there's a guard expression. + whitespace_after_if: SimpleWhitespace = SimpleWhitespace.field("") + + #: Whitespace before the colon. + whitespace_before_colon: SimpleWhitespace = SimpleWhitespace.field("") + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CSTNode": + return MatchCase( + leading_lines=visit_sequence( + self, "leading_lines", self.leading_lines, visitor + ), + whitespace_after_case=visit_required( + self, "whitespace_after_case", self.whitespace_after_case, visitor + ), + pattern=visit_required(self, "pattern", self.pattern, visitor), + whitespace_before_if=visit_required( + self, "whitespace_before_if", self.whitespace_before_if, visitor + ), + whitespace_after_if=visit_required( + self, "whitespace_after_if", self.whitespace_after_if, visitor + ), + guard=visit_optional(self, "guard", self.guard, visitor), + whitespace_before_colon=visit_required( + self, "whitespace_before_colon", self.whitespace_before_colon, visitor + ), + body=visit_required(self, "body", self.body, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + for ll in self.leading_lines: + ll._codegen(state) + state.add_indent_tokens() + with state.record_syntactic_position(self, end_node=self.body): + state.add_token("case") + self.whitespace_after_case._codegen(state) + self.pattern._codegen(state) + + guard = self.guard + if guard is not None: + self.whitespace_before_if._codegen(state) + state.add_token("if") + self.whitespace_after_if._codegen(state) + guard._codegen(state) + else: + self.whitespace_before_if._codegen(state) + self.whitespace_after_if._codegen(state) + + self.whitespace_before_colon._codegen(state) + state.add_token(":") + self.body._codegen(state) + + +@add_slots +@dataclass(frozen=True) +class MatchValue(MatchPattern): + """ + A match literal or value pattern that compares by equality. + """ + + #: an expression to compare to + value: BaseExpression + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CSTNode": + return MatchValue(value=visit_required(self, "value", self.value, visitor)) + + def _codegen_impl(self, state: CodegenState) -> None: + with state.record_syntactic_position(self): + self.value._codegen(state) + + @property + def lpar(self) -> Sequence[LeftParen]: + return self.value.lpar + + @lpar.setter + def lpar(self, value: Sequence[LeftParen]) -> None: + self.value.lpar = value + + @property + def rpar(self) -> Sequence[RightParen]: + return self.value.rpar + + @rpar.setter + def rpar(self, value: Sequence[RightParen]) -> None: + self.value.rpar = value + + +@add_slots +@dataclass(frozen=True) +class MatchSingleton(MatchPattern): + """ + A match literal pattern that compares by identity. + """ + + #: a literal to compare to + value: Name + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "CSTNode": + return MatchSingleton(value=visit_required(self, "value", self.value, visitor)) + + def _validate(self) -> None: + if self.value.value not in {"True", "False", "None"}: + raise CSTValidationError( + "A match singleton can only be True, False, or None" + ) + + def _codegen_impl(self, state: CodegenState) -> None: + with state.record_syntactic_position(self): + self.value._codegen(state) + + @property + def lpar(self) -> Sequence[LeftParen]: + return self.value.lpar + + @lpar.setter + def lpar(self, value: Sequence[LeftParen]) -> None: + # pyre-fixme[41]: Cannot reassign final attribute `lpar`. + self.value.lpar = value + + @property + def rpar(self) -> Sequence[RightParen]: + return self.value.rpar + + @rpar.setter + def rpar(self, value: Sequence[RightParen]) -> None: + # pyre-fixme[41]: Cannot reassign final attribute `rpar`. + self.value.rpar = value + + +@add_slots +@dataclass(frozen=True) +class MatchSequenceElement(CSTNode): + """ + An element in a sequence match pattern. + """ + + value: MatchPattern + + #: An optional trailing comma. + comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT + + def _visit_and_replace_children( + self, visitor: CSTVisitorT + ) -> "MatchSequenceElement": + return MatchSequenceElement( + value=visit_required(self, "value", self.value, visitor), + comma=visit_sentinel(self, "comma", self.comma, visitor), + ) + + def _codegen_impl( + self, + state: CodegenState, + default_comma: bool = False, + default_comma_whitespace: bool = True, + ) -> None: + with state.record_syntactic_position(self): + self.value._codegen(state) + comma = self.comma + if comma is MaybeSentinel.DEFAULT and default_comma: + state.add_token(", " if default_comma_whitespace else ",") + elif isinstance(comma, Comma): + comma._codegen(state) + + +@add_slots +@dataclass(frozen=True) +class MatchStar(CSTNode): + """ + A starred element in a sequence match pattern. Matches the rest of the sequence. + """ + + #: The name of the pattern binding. A ``None`` value represents ``*_``. + name: Optional[Name] = None + + #: An optional trailing comma. + comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT + + #: Optional whitespace between the star and the name. + whitespace_before_name: BaseParenthesizableWhitespace = SimpleWhitespace.field("") + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchStar": + return MatchStar( + whitespace_before_name=visit_required( + self, "whitespace_before_name", self.whitespace_before_name, visitor + ), + name=visit_optional(self, "name", self.name, visitor), + comma=visit_sentinel(self, "comma", self.comma, visitor), + ) + + def _codegen_impl( + self, + state: CodegenState, + default_comma: bool = False, + default_comma_whitespace: bool = True, + ) -> None: + with state.record_syntactic_position(self): + state.add_token("*") + self.whitespace_before_name._codegen(state) + name = self.name + if name is None: + state.add_token("_") + else: + name._codegen(state) + comma = self.comma + if comma is MaybeSentinel.DEFAULT and default_comma: + state.add_token(", " if default_comma_whitespace else ",") + elif isinstance(comma, Comma): + comma._codegen(state) + + +class MatchSequence(MatchPattern, ABC): + """ + A match sequence pattern. It's either a :class:`MatchList` or a :class:`MatchTuple`. + Matches a variable length sequence if one of the patterns is a :class:`MatchStar`, + otherwise matches a fixed length sequence. + """ + + __slots__ = () + + #: Patterns to be matched against the subject elements if it is a sequence. + patterns: Sequence[Union[MatchSequenceElement, MatchStar]] + + +@add_slots +@dataclass(frozen=True) +class MatchList(MatchSequence): + """ + A list match pattern. It's either an "open sequence pattern" (without brackets) or a + regular list literal (with brackets). + """ + + #: Patterns to be matched against the subject elements if it is a sequence. + patterns: Sequence[Union[MatchSequenceElement, MatchStar]] + + #: An optional left bracket. If missing, this is an open sequence pattern. + lbracket: Optional[LeftSquareBracket] = None + + #: An optional left bracket. If missing, this is an open sequence pattern. + rbracket: Optional[RightSquareBracket] = None + + #: Parenthesis at the beginning of the node + lpar: Sequence[LeftParen] = () + #: Parentheses after the pattern, but before a comma (if there is one). + rpar: Sequence[RightParen] = () + + def _validate(self) -> None: + if self.lbracket and not self.rbracket: + raise CSTValidationError("Cannot have left bracket without right bracket") + if self.rbracket and not self.lbracket: + raise CSTValidationError("Cannot have right bracket without left bracket") + + if not self.patterns and not self.lbracket: + raise CSTValidationError( + "Must have brackets if matching against empty list" + ) + + super(MatchList, self)._validate() + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchList": + return MatchList( + lpar=visit_sequence(self, "lpar", self.lpar, visitor), + lbracket=visit_optional(self, "lbracket", self.lbracket, visitor), + patterns=visit_sequence(self, "patterns", self.patterns, visitor), + rbracket=visit_optional(self, "rbracket", self.rbracket, visitor), + rpar=visit_sequence(self, "rpar", self.rpar, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + with self._parenthesize(state): + lbracket = self.lbracket + if lbracket is not None: + lbracket._codegen(state) + pats = self.patterns + for idx, pat in enumerate(pats): + pat._codegen(state, default_comma=(idx < len(pats) - 1)) + rbracket = self.rbracket + if rbracket is not None: + rbracket._codegen(state) + + +@add_slots +@dataclass(frozen=True) +class MatchTuple(MatchSequence): + """ + A tuple match pattern. + """ + + #: Patterns to be matched against the subject elements if it is a sequence. + patterns: Sequence[Union[MatchSequenceElement, MatchStar]] + + #: Parenthesis at the beginning of the node + lpar: Sequence[LeftParen] = field(default_factory=lambda: (LeftParen(),)) + #: Parentheses after the pattern, but before a comma (if there is one). + rpar: Sequence[RightParen] = field(default_factory=lambda: (RightParen(),)) + + def _validate(self) -> None: + if len(self.lpar) < 1: + raise CSTValidationError( + "Tuple patterns must have at least pair of parenthesis" + ) + + super(MatchTuple, self)._validate() + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchTuple": + return MatchTuple( + lpar=visit_sequence(self, "lpar", self.lpar, visitor), + patterns=visit_sequence(self, "patterns", self.patterns, visitor), + rpar=visit_sequence(self, "rpar", self.rpar, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + with self._parenthesize(state): + pats = self.patterns + patlen = len(pats) + for idx, pat in enumerate(pats): + pat._codegen( + state, + default_comma=patlen == 1 or (idx < patlen - 1), + default_comma_whitespace=patlen != 1, + ) + + +@add_slots +@dataclass(frozen=True) +class MatchMappingElement(CSTNode): + """ + A ``key: value`` pair in a match mapping pattern. + """ + + key: BaseExpression + + #: The pattern to be matched corresponding to ``key``. + pattern: MatchPattern + + #: An optional trailing comma. + comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT + + #: Whitespace between ``key`` and the colon. + whitespace_before_colon: BaseParenthesizableWhitespace = SimpleWhitespace.field("") + + #: Whitespace between the colon and ``pattern``. + whitespace_after_colon: BaseParenthesizableWhitespace = SimpleWhitespace.field(" ") + + def _visit_and_replace_children( + self, visitor: CSTVisitorT + ) -> "MatchMappingElement": + return MatchMappingElement( + key=visit_required(self, "key", self.key, visitor), + whitespace_before_colon=visit_required( + self, "whitespace_before_colon", self.whitespace_before_colon, visitor + ), + whitespace_after_colon=visit_required( + self, "whitespace_after_colon", self.whitespace_after_colon, visitor + ), + pattern=visit_required(self, "pattern", self.pattern, visitor), + comma=visit_sentinel(self, "comma", self.comma, visitor), + ) + + def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: + with state.record_syntactic_position(self): + self.key._codegen(state) + self.whitespace_before_colon._codegen(state) + state.add_token(":") + self.whitespace_after_colon._codegen(state) + self.pattern._codegen(state) + comma = self.comma + if comma is MaybeSentinel.DEFAULT and default_comma: + state.add_token(", ") + elif isinstance(comma, Comma): + comma._codegen(state) + + +@add_slots +@dataclass(frozen=True) +class MatchMapping(MatchPattern): + """ + A match mapping pattern. + """ + + #: A sequence of mapping elements. + elements: Sequence[MatchMappingElement] = () + + #: Left curly brace at the beginning of the pattern. + lbrace: LeftCurlyBrace = LeftCurlyBrace.field() + + #: Right curly brace at the end of the pattern. + rbrace: RightCurlyBrace = RightCurlyBrace.field() + + #: An optional name to capture the remaining elements of the mapping. + rest: Optional[Name] = None + + #: Optional whitespace between stars and ``rest``. + whitespace_before_rest: SimpleWhitespace = SimpleWhitespace.field("") + + #: An optional trailing comma attached to ``rest``. + trailing_comma: Optional[Comma] = None + + #: Parenthesis at the beginning of the node + lpar: Sequence[LeftParen] = () + #: Parentheses after the pattern + rpar: Sequence[RightParen] = () + + def _validate(self) -> None: + if isinstance(self.trailing_comma, Comma) and self.rest is not None: + raise CSTValidationError("Cannot have a trailing comma without **rest") + super(MatchMapping, self)._validate() + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchMapping": + return MatchMapping( + lpar=visit_sequence(self, "lpar", self.lpar, visitor), + lbrace=visit_required(self, "lbrace", self.lbrace, visitor), + elements=visit_sequence(self, "elements", self.elements, visitor), + whitespace_before_rest=visit_required( + self, "whitespace_before_rest", self.whitespace_before_rest, visitor + ), + rest=visit_optional(self, "rest", self.rest, visitor), + trailing_comma=visit_optional( + self, "trailing_comma", self.trailing_comma, visitor + ), + rbrace=visit_required(self, "rbrace", self.rbrace, visitor), + rpar=visit_sequence(self, "rpar", self.rpar, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + with self._parenthesize(state): + self.lbrace._codegen(state) + elems = self.elements + rest = self.rest + for idx, el in enumerate(elems): + el._codegen( + state, default_comma=rest is not None or idx < len(elems) - 1 + ) + + if rest is not None: + state.add_token("**") + self.whitespace_before_rest._codegen(state) + rest._codegen(state) + comma = self.trailing_comma + if comma is not None: + comma._codegen(state) + + self.rbrace._codegen(state) + + +@add_slots +@dataclass(frozen=True) +class MatchKeywordElement(CSTNode): + """ + A key=value pair in a :class:`MatchClass`. + """ + + key: Name + + #: The pattern to be matched against the attribute named ``key``. + pattern: MatchPattern + + #: An optional trailing comma. + comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT + + #: Whitespace between ``key`` and the equals sign. + whitespace_before_equal: BaseParenthesizableWhitespace = SimpleWhitespace.field("") + + #: Whitespace between the equals sign and ``pattern``. + whitespace_after_equal: BaseParenthesizableWhitespace = SimpleWhitespace.field("") + + def _visit_and_replace_children( + self, visitor: CSTVisitorT + ) -> "MatchKeywordElement": + return MatchKeywordElement( + key=visit_required(self, "key", self.key, visitor), + whitespace_before_equal=visit_required( + self, "whitespace_before_equal", self.whitespace_before_equal, visitor + ), + whitespace_after_equal=visit_required( + self, "whitespace_after_equal", self.whitespace_after_equal, visitor + ), + pattern=visit_required(self, "pattern", self.pattern, visitor), + comma=visit_sentinel(self, "comma", self.comma, visitor), + ) + + def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: + with state.record_syntactic_position(self): + self.key._codegen(state) + self.whitespace_before_equal._codegen(state) + state.add_token("=") + self.whitespace_after_equal._codegen(state) + self.pattern._codegen(state) + comma = self.comma + if comma is MaybeSentinel.DEFAULT and default_comma: + state.add_token(", ") + elif isinstance(comma, Comma): + comma._codegen(state) + + +@add_slots +@dataclass(frozen=True) +class MatchClass(MatchPattern): + """ + A match class pattern. + """ + + #: An expression giving the nominal class to be matched. + cls: BaseExpression + + #: A sequence of patterns to be matched against the class defined sequence of + #: pattern matching attributes. + patterns: Sequence[MatchSequenceElement] = () + + #: A sequence of additional attribute names and corresponding patterns to be + #: matched. + kwds: Sequence[MatchKeywordElement] = () + + #: Whitespace between the class name and the left parenthesis. + whitespace_after_cls: BaseParenthesizableWhitespace = SimpleWhitespace.field("") + + #: Whitespace between the left parenthesis and the first pattern. + whitespace_before_patterns: BaseParenthesizableWhitespace = SimpleWhitespace.field( + "" + ) + + #: Whitespace between the last pattern and the right parenthesis. + whitespace_after_kwds: BaseParenthesizableWhitespace = SimpleWhitespace.field("") + + #: Parenthesis at the beginning of the node + lpar: Sequence[LeftParen] = () + #: Parentheses after the pattern + rpar: Sequence[RightParen] = () + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchClass": + return MatchClass( + lpar=visit_sequence(self, "lpar", self.lpar, visitor), + cls=visit_required(self, "cls", self.cls, visitor), + whitespace_after_cls=visit_required( + self, "whitespace_after_cls", self.whitespace_after_cls, visitor + ), + whitespace_before_patterns=visit_required( + self, + "whitespace_before_patterns", + self.whitespace_before_patterns, + visitor, + ), + patterns=visit_sequence(self, "patterns", self.patterns, visitor), + kwds=visit_sequence(self, "kwds", self.kwds, visitor), + whitespace_after_kwds=visit_required( + self, "whitespace_after_kwds", self.whitespace_after_kwds, visitor + ), + rpar=visit_sequence(self, "rpar", self.rpar, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + with self._parenthesize(state): + self.cls._codegen(state) + self.whitespace_after_cls._codegen(state) + state.add_token("(") + self.whitespace_before_patterns._codegen(state) + pats = self.patterns + kwds = self.kwds + for idx, pat in enumerate(pats): + pat._codegen(state, default_comma=idx + 1 < len(pats) + len(kwds)) + for idx, kwd in enumerate(kwds): + kwd._codegen(state, default_comma=idx + 1 < len(kwds)) + self.whitespace_after_kwds._codegen(state) + state.add_token(")") + + +@add_slots +@dataclass(frozen=True) +class MatchAs(MatchPattern): + """ + A match "as-pattern", capture pattern, or wildcard pattern. + """ + + #: The match pattern that the subject will be matched against. If this is ``None``, + #: the node represents a capture pattern (i.e. a bare name) and will always succeed. + pattern: Optional[MatchPattern] = None + + #: The name that will be bound if the pattern is successful. If this is ``None``, + #: ``pattern`` must also be ``None`` and the node represents the wildcard pattern + #: (i.e. ``_``). + name: Optional[Name] = None + + #: Whitespace between ``pattern`` and the ``as`` keyword (if ``pattern`` is not + #: ``None``) + whitespace_before_as: Union[BaseParenthesizableWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) + + #: Whitespace between the ``as`` keyword and ``name`` (if ``pattern`` is not + #: ``None``) + whitespace_after_as: Union[BaseParenthesizableWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) + + #: Parenthesis at the beginning of the node + lpar: Sequence[LeftParen] = () + #: Parentheses after the pattern + rpar: Sequence[RightParen] = () + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchAs": + return MatchAs( + lpar=visit_sequence(self, "lpar", self.lpar, visitor), + pattern=visit_optional(self, "pattern", self.pattern, visitor), + whitespace_before_as=visit_sentinel( + self, "whitespace_before_as", self.whitespace_before_as, visitor + ), + whitespace_after_as=visit_sentinel( + self, "whitespace_after_as", self.whitespace_after_as, visitor + ), + name=visit_optional(self, "name", self.name, visitor), + rpar=visit_sequence(self, "rpar", self.rpar, visitor), + ) + + def _validate(self) -> None: + if self.name is None and self.pattern is not None: + raise CSTValidationError("Pattern must be None if name is None") + super(MatchAs, self)._validate() + + def _codegen_impl(self, state: CodegenState) -> None: + with self._parenthesize(state): + pat = self.pattern + name = self.name + if pat is not None: + pat._codegen(state) + ws_before = self.whitespace_before_as + if ws_before is MaybeSentinel.DEFAULT: + state.add_token(" ") + elif isinstance(ws_before, BaseParenthesizableWhitespace): + ws_before._codegen(state) + state.add_token("as") + ws_after = self.whitespace_after_as + if ws_after is MaybeSentinel.DEFAULT: + state.add_token(" ") + elif isinstance(ws_after, BaseParenthesizableWhitespace): + ws_after._codegen(state) + else: + ws_before = self.whitespace_before_as + if isinstance(ws_before, BaseParenthesizableWhitespace): + ws_before._codegen(state) + ws_after = self.whitespace_after_as + if isinstance(ws_after, BaseParenthesizableWhitespace): + ws_after._codegen(state) + if name is None: + state.add_token("_") + else: + name._codegen(state) + + +@add_slots +@dataclass(frozen=True) +class MatchOrElement(CSTNode): + """ + An element in a :class:`MatchOr` node. + """ + + pattern: MatchPattern + + #: An optional ``|`` separator. + separator: Union[BitOr, MaybeSentinel] = MaybeSentinel.DEFAULT + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchOrElement": + return MatchOrElement( + pattern=visit_required(self, "pattern", self.pattern, visitor), + separator=visit_sentinel(self, "separator", self.separator, visitor), + ) + + def _codegen_impl( + self, state: CodegenState, default_separator: bool = False + ) -> None: + with state.record_syntactic_position(self): + self.pattern._codegen(state) + sep = self.separator + if sep is MaybeSentinel.DEFAULT and default_separator: + state.add_token(" | ") + elif isinstance(sep, BitOr): + sep._codegen(state) + + +@add_slots +@dataclass(frozen=True) +class MatchOr(MatchPattern): + """ + A match "or-pattern". It matches each of its subpatterns in turn to the subject, + until one succeeds. The or-pattern is then deemed to succeed. If none of the + subpatterns succeed the or-pattern fails. + """ + + #: The subpatterns to be tried in turn. + patterns: Sequence[MatchOrElement] + + #: Parenthesis at the beginning of the node + lpar: Sequence[LeftParen] = () + #: Parentheses after the pattern + rpar: Sequence[RightParen] = () + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "MatchOr": + return MatchOr( + lpar=visit_sequence(self, "lpar", self.lpar, visitor), + patterns=visit_sequence(self, "patterns", self.patterns, visitor), + rpar=visit_sequence(self, "rpar", self.rpar, visitor), + ) + + def _codegen_impl(self, state: CodegenState) -> None: + with self._parenthesize(state): + pats = self.patterns + for idx, pat in enumerate(pats): + pat._codegen(state, default_separator=idx + 1 < len(pats)) + + +@add_slots +@dataclass(frozen=True) +class TypeVar(CSTNode): + """ + A simple (non-variadic) type variable. + + Note: this node represents type a variable when declared using PEP-695 syntax. + """ + + #: The name of the type variable. + name: Name + + #: An optional bound on the type. + bound: Optional[BaseExpression] = None + + #: The colon used to separate the name and bound. If not specified, + #: :class:`MaybeSentinel` will be replaced with a colon if there is a bound, + #: otherwise will be left empty. + colon: Union[Colon, MaybeSentinel] = MaybeSentinel.DEFAULT + + def _codegen_impl(self, state: CodegenState) -> None: + with state.record_syntactic_position(self): + self.name._codegen(state) + bound = self.bound + colon = self.colon + if not isinstance(colon, MaybeSentinel): + colon._codegen(state) + else: + if bound is not None: + state.add_token(": ") + + if bound is not None: + bound._codegen(state) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeVar": + return TypeVar( + name=visit_required(self, "name", self.name, visitor), + colon=visit_sentinel(self, "colon", self.colon, visitor), + bound=visit_optional(self, "bound", self.bound, visitor), + ) + + +@add_slots +@dataclass(frozen=True) +class TypeVarTuple(CSTNode): + """ + A variadic type variable. + """ + + #: The name of this type variable. + name: Name + + #: The (optional) whitespace between the star declaring this type variable as + #: variadic, and the variable's name. + whitespace_after_star: SimpleWhitespace = SimpleWhitespace.field("") + + def _codegen_impl(self, state: CodegenState) -> None: + with state.record_syntactic_position(self): + state.add_token("*") + self.whitespace_after_star._codegen(state) + self.name._codegen(state) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeVarTuple": + return TypeVarTuple( + name=visit_required(self, "name", self.name, visitor), + whitespace_after_star=visit_required( + self, "whitespace_after_star", self.whitespace_after_star, visitor + ), + ) + + +@add_slots +@dataclass(frozen=True) +class ParamSpec(CSTNode): + """ + A parameter specification. + + Note: this node represents a parameter specification when declared using PEP-695 + syntax. + """ + + #: The name of this parameter specification. + name: Name + + #: The (optional) whitespace between the double star declaring this type variable as + #: a parameter specification, and the name. + whitespace_after_star: SimpleWhitespace = SimpleWhitespace.field("") + + def _codegen_impl(self, state: CodegenState) -> None: + with state.record_syntactic_position(self): + state.add_token("**") + self.whitespace_after_star._codegen(state) + self.name._codegen(state) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "ParamSpec": + return ParamSpec( + name=visit_required(self, "name", self.name, visitor), + whitespace_after_star=visit_required( + self, "whitespace_after_star", self.whitespace_after_star, visitor + ), + ) + + +@add_slots +@dataclass(frozen=True) +class TypeParam(CSTNode): + """ + A single type parameter that is contained in a :class:`TypeParameters` list. + """ + + #: The actual parameter. + param: Union[TypeVar, TypeVarTuple, ParamSpec] + + #: A trailing comma. If one is not provided, :class:`MaybeSentinel` will be replaced + #: with a comma only if a comma is required. + comma: Union[Comma, MaybeSentinel] = MaybeSentinel.DEFAULT + + #: The equal sign used to denote assignment if there is a default. + equal: Union[AssignEqual, MaybeSentinel] = MaybeSentinel.DEFAULT + + #: The star used to denote a variadic default + star: Literal["", "*"] = "" + + #: The whitespace between the star and the type. + whitespace_after_star: SimpleWhitespace = SimpleWhitespace.field("") + + #: Any optional default value, used when the argument is not supplied. + default: Optional[BaseExpression] = None + + def _codegen_impl(self, state: CodegenState, default_comma: bool = False) -> None: + self.param._codegen(state) + + equal = self.equal + if equal is MaybeSentinel.DEFAULT and self.default is not None: + state.add_token(" = ") + elif isinstance(equal, AssignEqual): + equal._codegen(state) + + state.add_token(self.star) + self.whitespace_after_star._codegen(state) + + default = self.default + if default is not None: + default._codegen(state) + + comma = self.comma + if isinstance(comma, MaybeSentinel): + if default_comma: + state.add_token(", ") + else: + comma._codegen(state) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeParam": + ret = TypeParam( + param=visit_required(self, "param", self.param, visitor), + equal=visit_sentinel(self, "equal", self.equal, visitor), + star=self.star, + whitespace_after_star=visit_required( + self, "whitespace_after_star", self.whitespace_after_star, visitor + ), + default=visit_optional(self, "default", self.default, visitor), + comma=visit_sentinel(self, "comma", self.comma, visitor), + ) + return ret + + def _validate(self) -> None: + if self.default is None and isinstance(self.equal, AssignEqual): + raise CSTValidationError( + "Must have a default when specifying an AssignEqual." + ) + if self.star and not (self.default or isinstance(self.equal, AssignEqual)): + raise CSTValidationError("Star can only be present if a default") + if isinstance(self.star, str) and self.star not in ("", "*"): + raise CSTValidationError("Must specify either '' or '*' for star.") + + +@add_slots +@dataclass(frozen=True) +class TypeParameters(CSTNode): + """ + Type parameters when specified with PEP-695 syntax. + + This node captures all specified parameters that are enclosed with square brackets. + """ + + #: The parameters within the square brackets. + params: Sequence[TypeParam] = () + + #: Opening square bracket that marks the start of these parameters. + lbracket: LeftSquareBracket = LeftSquareBracket.field() + #: Closing square bracket that marks the end of these parameters. + rbracket: RightSquareBracket = RightSquareBracket.field() + + def _codegen_impl(self, state: CodegenState) -> None: + self.lbracket._codegen(state) + params_len = len(self.params) + for idx, param in enumerate(self.params): + param._codegen(state, default_comma=idx + 1 < params_len) + self.rbracket._codegen(state) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeParameters": + return TypeParameters( + lbracket=visit_required(self, "lbracket", self.lbracket, visitor), + params=visit_sequence(self, "params", self.params, visitor), + rbracket=visit_required(self, "rbracket", self.rbracket, visitor), + ) + + +@add_slots +@dataclass(frozen=True) +class TypeAlias(BaseSmallStatement): + """ + A type alias statement. + + This node represents the ``type`` statement as specified initially by PEP-695. + Example: ``type ListOrSet[T] = list[T] | set[T]``. + """ + + #: The name being introduced in this statement. + name: Name + + #: Everything on the right hand side of the ``=``. + value: BaseExpression + + #: An optional list of type parameters, specified after the name. + type_parameters: Optional[TypeParameters] = None + + #: Whitespace between the ``type`` soft keyword and the name. + whitespace_after_type: SimpleWhitespace = SimpleWhitespace.field(" ") + + #: Whitespace between the name and the type parameters (if they exist) or the ``=``. + #: If not specified, :class:`MaybeSentinel` will be replaced with a single space if + #: there are no type parameters, otherwise no spaces. + whitespace_after_name: Union[SimpleWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) + + #: Whitespace between the type parameters and the ``=``. Always empty if there are + #: no type parameters. If not specified, :class:`MaybeSentinel` will be replaced + #: with a single space if there are type parameters. + whitespace_after_type_parameters: Union[SimpleWhitespace, MaybeSentinel] = ( + MaybeSentinel.DEFAULT + ) + + #: Whitespace between the ``=`` and the value. + whitespace_after_equals: SimpleWhitespace = SimpleWhitespace.field(" ") + + #: Optional semicolon when this is used in a statement line. This semicolon + #: owns the whitespace on both sides of it when it is used. + semicolon: Union[Semicolon, MaybeSentinel] = MaybeSentinel.DEFAULT + + def _validate(self) -> None: + if ( + self.type_parameters is None + and self.whitespace_after_type_parameters + not in { + SimpleWhitespace(""), + MaybeSentinel.DEFAULT, + } + ): + raise CSTValidationError( + "whitespace_after_type_parameters must be empty when there are no type parameters in a TypeAlias" + ) + + def _visit_and_replace_children(self, visitor: CSTVisitorT) -> "TypeAlias": + return TypeAlias( + whitespace_after_type=visit_required( + self, "whitespace_after_type", self.whitespace_after_type, visitor + ), + name=visit_required(self, "name", self.name, visitor), + whitespace_after_name=visit_sentinel( + self, "whitespace_after_name", self.whitespace_after_name, visitor + ), + type_parameters=visit_optional( + self, "type_parameters", self.type_parameters, visitor + ), + whitespace_after_type_parameters=visit_sentinel( + self, + "whitespace_after_type_parameters", + self.whitespace_after_type_parameters, + visitor, + ), + whitespace_after_equals=visit_required( + self, "whitespace_after_equals", self.whitespace_after_equals, visitor + ), + value=visit_required(self, "value", self.value, visitor), + semicolon=visit_sentinel(self, "semicolon", self.semicolon, visitor), + ) + + def _codegen_impl( + self, state: CodegenState, default_semicolon: bool = False + ) -> None: + with state.record_syntactic_position(self): + state.add_token("type") + self.whitespace_after_type._codegen(state) + self.name._codegen(state) + ws_after_name = self.whitespace_after_name + if isinstance(ws_after_name, MaybeSentinel): + if self.type_parameters is None: + state.add_token(" ") + else: + ws_after_name._codegen(state) + + ws_after_type_params = self.whitespace_after_type_parameters + if self.type_parameters is not None: + self.type_parameters._codegen(state) + if isinstance(ws_after_type_params, MaybeSentinel): + state.add_token(" ") + else: + ws_after_type_params._codegen(state) + + state.add_token("=") + self.whitespace_after_equals._codegen(state) + self.value._codegen(state) + + semi = self.semicolon + if isinstance(semi, MaybeSentinel): + if default_semicolon: + state.add_token("; ") + else: + semi._codegen(state) diff --git a/libcst/_nodes/tests/__init__.py b/libcst/_nodes/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_nodes/tests/__init__.py +++ b/libcst/_nodes/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/base.py b/libcst/_nodes/tests/base.py index ed3b2b10..65e7059d 100644 --- a/libcst/_nodes/tests/base.py +++ b/libcst/_nodes/tests/base.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -137,7 +137,9 @@ class CSTNodeTest(UnitTest): codegen_children, msg=( "The list of children we got from `node.children` differs from the " - + "children that were visited by `node._codegen`." + + "children that were visited by `node._codegen`. This is probably " + + "due to a mismatch between _visit_and_replace_children and " + + "_codegen_impl." ), ) @@ -237,7 +239,7 @@ class CSTNodeTest(UnitTest): def assert_parses( self, code: str, - parser: Callable[[str], cst.BaseExpression], + parser: Callable[[str], cst.CSTNode], expect_success: bool, ) -> None: if not expect_success: diff --git a/libcst/_nodes/tests/test_assert.py b/libcst/_nodes/tests/test_assert.py index 4af2e53a..5d080215 100644 --- a/libcst/_nodes/tests/test_assert.py +++ b/libcst/_nodes/tests/test_assert.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_assign.py b/libcst/_nodes/tests/test_assign.py index 7b107d94..7df2fdee 100644 --- a/libcst/_nodes/tests/test_assign.py +++ b/libcst/_nodes/tests/test_assign.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -422,7 +422,7 @@ class AugAssignTest(CSTNodeTest): operator=cst.Add(), right=cst.Integer("1"), ), - operator=cst.Add(), + operator=cst.AddAssign(), value=cst.Name("y"), ) ), diff --git a/libcst/_nodes/tests/test_atom.py b/libcst/_nodes/tests/test_atom.py index 1a14e372..a33732c2 100644 --- a/libcst/_nodes/tests/test_atom.py +++ b/libcst/_nodes/tests/test_atom.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -655,6 +655,48 @@ class AtomTest(CSTNodeTest): "parser": _parse_expression_force_38, "expected_position": None, }, + { + "node": cst.FormattedString( + parts=( + cst.FormattedStringExpression( + cst.Yield( + value=cst.Integer("1"), + whitespace_after_yield=cst.SimpleWhitespace(" "), + ), + ), + ), + ), + "code": 'f"{yield 1}"', + "parser": _parse_expression_force_38, + "expected_position": None, + }, + { + "node": cst.FormattedString( + parts=( + cst.FormattedStringText("\\N{X Y}"), + cst.FormattedStringExpression( + cst.Name(value="Z"), + ), + ), + ), + "code": 'f"\\N{X Y}{Z}"', + "parser": parse_expression, + "expected_position": None, + }, + { + "node": cst.FormattedString( + parts=( + cst.FormattedStringText("\\"), + cst.FormattedStringExpression( + cst.Name(value="a"), + ), + ), + start='fr"', + ), + "code": 'fr"\\{a}"', + "parser": parse_expression, + "expected_position": None, + }, # Validate parens { "node": cst.FormattedString( @@ -697,6 +739,69 @@ class AtomTest(CSTNodeTest): "parser": parse_expression, "expected_position": None, }, + # Unpacked tuple + { + "node": cst.FormattedString( + parts=[ + cst.FormattedStringExpression( + expression=cst.Tuple( + elements=[ + cst.Element( + value=cst.Name( + value="a", + ), + comma=cst.Comma( + whitespace_before=cst.SimpleWhitespace( + value="", + ), + whitespace_after=cst.SimpleWhitespace( + value=" ", + ), + ), + ), + cst.Element( + value=cst.Name( + value="b", + ), + ), + ], + lpar=[], + rpar=[], + ), + ), + ], + start="f'", + end="'", + ), + "code": "f'{a, b}'", + "parser": parse_expression, + "expected_position": None, + }, + # Conditional expression + { + "node": cst.FormattedString( + parts=[ + cst.FormattedStringExpression( + expression=cst.IfExp( + test=cst.Name( + value="b", + ), + body=cst.Name( + value="a", + ), + orelse=cst.Name( + value="c", + ), + ), + ), + ], + start="f'", + end="'", + ), + "code": "f'{a if b else c}'", + "parser": parse_expression, + "expected_position": None, + }, # Concatenated strings { "node": cst.ConcatenatedString( @@ -1078,6 +1183,8 @@ class AtomTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) diff --git a/libcst/_nodes/tests/test_attribute.py b/libcst/_nodes/tests/test_attribute.py index e3c021e3..ef017a1e 100644 --- a/libcst/_nodes/tests/test_attribute.py +++ b/libcst/_nodes/tests/test_attribute.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_await.py b/libcst/_nodes/tests/test_await.py index a07f2c62..1d52642b 100644 --- a/libcst/_nodes/tests/test_await.py +++ b/libcst/_nodes/tests/test_await.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from typing import Any import libcst as cst -from libcst import PartialParserConfig, parse_expression, parse_statement +from libcst import parse_expression, parse_statement, PartialParserConfig from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider @@ -46,6 +46,14 @@ class AwaitTest(CSTNodeTest): ), "expected_position": CodeRange((1, 2), (1, 13)), }, + # Whitespace after await + { + "node": cst.Await( + cst.Name("foo", lpar=[cst.LeftParen()], rpar=[cst.RightParen()]), + whitespace_after_await=cst.SimpleWhitespace(""), + ), + "code": "await(foo)", + }, ) ) def test_valid_py37(self, **kwargs: Any) -> None: diff --git a/libcst/_nodes/tests/test_binary_op.py b/libcst/_nodes/tests/test_binary_op.py index 50f8ff79..f6b40daf 100644 --- a/libcst/_nodes/tests/test_binary_op.py +++ b/libcst/_nodes/tests/test_binary_op.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -174,3 +174,18 @@ class BinaryOperationTest(CSTNodeTest): ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) + + @data_provider( + ( + { + "code": '"a"' * 6000, + "parser": parse_expression, + }, + { + "code": "[_" + " for _ in _" * 6000 + "]", + "parser": parse_expression, + }, + ) + ) + def test_parse_error(self, **kwargs: Any) -> None: + self.assert_parses(**kwargs, expect_success=False) diff --git a/libcst/_nodes/tests/test_boolean_op.py b/libcst/_nodes/tests/test_boolean_op.py index bf63a49d..efde7ce4 100644 --- a/libcst/_nodes/tests/test_boolean_op.py +++ b/libcst/_nodes/tests/test_boolean_op.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_call.py b/libcst/_nodes/tests/test_call.py index c58af996..666db00d 100644 --- a/libcst/_nodes/tests/test_call.py +++ b/libcst/_nodes/tests/test_call.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_classdef.py b/libcst/_nodes/tests/test_classdef.py index 1f0c4090..2e026a6c 100644 --- a/libcst/_nodes/tests/test_classdef.py +++ b/libcst/_nodes/tests/test_classdef.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -112,6 +112,105 @@ class ClassDefCreationTest(CSTNodeTest): def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) + @data_provider( + ( + { + "node": cst.ClassDef( + cst.Name("Foo"), + cst.SimpleStatementSuite((cst.Pass(),)), + type_parameters=cst.TypeParameters( + ( + cst.TypeParam( + cst.TypeVar( + cst.Name("T"), + bound=cst.Name("int"), + colon=cst.Colon( + whitespace_after=cst.SimpleWhitespace(" ") + ), + ), + cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), + ), + cst.TypeParam( + cst.TypeVarTuple(cst.Name("Ts")), + cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), + ), + cst.TypeParam(cst.ParamSpec(cst.Name("KW"))), + ) + ), + ), + "code": "class Foo[T: int, *Ts, **KW]: pass\n", + }, + { + "node": cst.ClassDef( + cst.Name("Foo"), + cst.SimpleStatementSuite((cst.Pass(),)), + type_parameters=cst.TypeParameters( + params=( + cst.TypeParam( + param=cst.TypeVar( + cst.Name("T"), + bound=cst.Name("str"), + colon=cst.Colon( + whitespace_before=cst.SimpleWhitespace(" "), + whitespace_after=cst.ParenthesizedWhitespace( + empty_lines=(cst.EmptyLine(),), + indent=True, + ), + ), + ), + comma=cst.Comma(cst.SimpleWhitespace(" ")), + ), + cst.TypeParam( + cst.ParamSpec( + cst.Name("PS"), cst.SimpleWhitespace(" ") + ), + cst.Comma(cst.SimpleWhitespace(" ")), + ), + ) + ), + whitespace_after_type_parameters=cst.SimpleWhitespace(" "), + ), + "code": "class Foo[T :\n\nstr ,** PS ,] : pass\n", + }, + { + "node": cst.ClassDef( + cst.Name("Foo"), + cst.SimpleStatementSuite((cst.Pass(),)), + type_parameters=cst.TypeParameters( + params=( + cst.TypeParam( + param=cst.TypeVar( + cst.Name("T"), + bound=cst.Name("str"), + colon=cst.Colon( + whitespace_before=cst.SimpleWhitespace(" "), + whitespace_after=cst.ParenthesizedWhitespace( + empty_lines=(cst.EmptyLine(),), + indent=True, + ), + ), + ), + comma=cst.Comma(cst.SimpleWhitespace(" ")), + ), + cst.TypeParam( + cst.ParamSpec( + cst.Name("PS"), cst.SimpleWhitespace(" ") + ), + cst.Comma(cst.SimpleWhitespace(" ")), + ), + ) + ), + lpar=cst.LeftParen(), + rpar=cst.RightParen(), + whitespace_after_type_parameters=cst.SimpleWhitespace(" "), + ), + "code": "class Foo[T :\n\nstr ,** PS ,] (): pass\n", + }, + ) + ) + def test_valid_native(self, **kwargs: Any) -> None: + self.validate_node(**kwargs) + @data_provider( ( # Basic parenthesis tests. diff --git a/libcst/_nodes/tests/test_comment.py b/libcst/_nodes/tests/test_comment.py index 79393765..a2c8d2c9 100644 --- a/libcst/_nodes/tests/test_comment.py +++ b/libcst/_nodes/tests/test_comment.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_comparison.py b/libcst/_nodes/tests/test_comparison.py index b70dab1e..d4b985c7 100644 --- a/libcst/_nodes/tests/test_comparison.py +++ b/libcst/_nodes/tests/test_comparison.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_cst_node.py b/libcst/_nodes/tests/test_cst_node.py index e3cb7e9a..8cc30dc6 100644 --- a/libcst/_nodes/tests/test_cst_node.py +++ b/libcst/_nodes/tests/test_cst_node.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -10,8 +10,7 @@ import libcst as cst from libcst._removal_sentinel import RemovalSentinel from libcst._types import CSTNodeT from libcst._visitors import CSTTransformer -from libcst.testing.utils import UnitTest, data_provider, none_throws - +from libcst.testing.utils import data_provider, none_throws, UnitTest _EMPTY_SIMPLE_WHITESPACE = cst.SimpleWhitespace("") diff --git a/libcst/_nodes/tests/test_del.py b/libcst/_nodes/tests/test_del.py index 83d97cb4..ecbebcb5 100644 --- a/libcst/_nodes/tests/test_del.py +++ b/libcst/_nodes/tests/test_del.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_dict.py b/libcst/_nodes/tests/test_dict.py index 425adb79..47cb0663 100644 --- a/libcst/_nodes/tests/test_dict.py +++ b/libcst/_nodes/tests/test_dict.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -187,4 +187,6 @@ class DictTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) diff --git a/libcst/_nodes/tests/test_dict_comp.py b/libcst/_nodes/tests/test_dict_comp.py index 829cb96f..a753375f 100644 --- a/libcst/_nodes/tests/test_dict_comp.py +++ b/libcst/_nodes/tests/test_dict_comp.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -26,6 +26,17 @@ class DictCompTest(CSTNodeTest): "parser": parse_expression, "expected_position": CodeRange((1, 0), (1, 17)), }, + # non-trivial keys & values in DictComp + { + "node": cst.DictComp( + cst.BinaryOperation(cst.Name("k1"), cst.Add(), cst.Name("k2")), + cst.BinaryOperation(cst.Name("v1"), cst.Add(), cst.Name("v2")), + cst.CompFor(target=cst.Name("a"), iter=cst.Name("b")), + ), + "code": "{k1 + k2: v1 + v2 for a in b}", + "parser": parse_expression, + "expected_position": CodeRange((1, 0), (1, 29)), + }, # custom whitespace around colon { "node": cst.DictComp( diff --git a/libcst/_nodes/tests/test_docstring.py b/libcst/_nodes/tests/test_docstring.py index 6e05d0fa..7c87a9c0 100644 --- a/libcst/_nodes/tests/test_docstring.py +++ b/libcst/_nodes/tests/test_docstring.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,7 +9,7 @@ from typing import Optional import libcst as cst from libcst.helpers import ensure_type -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class DocstringTest(UnitTest): diff --git a/libcst/_nodes/tests/test_else.py b/libcst/_nodes/tests/test_else.py index ffaad752..d144ab81 100644 --- a/libcst/_nodes/tests/test_else.py +++ b/libcst/_nodes/tests/test_else.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_empty_line.py b/libcst/_nodes/tests/test_empty_line.py index ad4c647c..9e1a6f72 100644 --- a/libcst/_nodes/tests/test_empty_line.py +++ b/libcst/_nodes/tests/test_empty_line.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_flatten_behavior.py b/libcst/_nodes/tests/test_flatten_behavior.py new file mode 100644 index 00000000..463c457f --- /dev/null +++ b/libcst/_nodes/tests/test_flatten_behavior.py @@ -0,0 +1,79 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Type, Union + +import libcst as cst +from libcst import FlattenSentinel, parse_expression, parse_module, RemovalSentinel +from libcst._nodes.tests.base import CSTNodeTest +from libcst._types import CSTNodeT +from libcst._visitors import CSTTransformer +from libcst.testing.utils import data_provider + + +class InsertPrintBeforeReturn(CSTTransformer): + def leave_Return( + self, original_node: cst.Return, updated_node: cst.Return + ) -> Union[cst.Return, RemovalSentinel, FlattenSentinel[cst.BaseSmallStatement]]: + return FlattenSentinel( + [ + cst.Expr(parse_expression("print('returning')")), + updated_node, + ] + ) + + +class FlattenLines(CSTTransformer): + def on_leave( + self, original_node: CSTNodeT, updated_node: CSTNodeT + ) -> Union[CSTNodeT, RemovalSentinel, FlattenSentinel[cst.SimpleStatementLine]]: + if isinstance(updated_node, cst.SimpleStatementLine): + return FlattenSentinel( + [ + cst.SimpleStatementLine( + [stmt.with_changes(semicolon=cst.MaybeSentinel.DEFAULT)] + ) + for stmt in updated_node.body + ] + ) + else: + return updated_node + + +class RemoveReturnWithEmpty(CSTTransformer): + def leave_Return( + self, original_node: cst.Return, updated_node: cst.Return + ) -> Union[cst.Return, RemovalSentinel, FlattenSentinel[cst.BaseSmallStatement]]: + return FlattenSentinel([]) + + +class FlattenBehavior(CSTNodeTest): + @data_provider( + ( + ("return", "print('returning'); return", InsertPrintBeforeReturn), + ( + "print('returning'); return", + "print('returning')\nreturn", + FlattenLines, + ), + ( + "print('returning')\nreturn", + "print('returning')", + RemoveReturnWithEmpty, + ), + ) + ) + def test_flatten_pass_behavior( + self, before: str, after: str, visitor: Type[CSTTransformer] + ) -> None: + # Test doesn't have newline termination case + before_module = parse_module(before) + after_module = before_module.visit(visitor()) + self.assertEqual(after, after_module.code) + + # Test does have newline termination case + before_module = parse_module(before + "\n") + after_module = before_module.visit(visitor()) + self.assertEqual(after + "\n", after_module.code) diff --git a/libcst/_nodes/tests/test_for.py b/libcst/_nodes/tests/test_for.py index ffc91b48..c4d1421b 100644 --- a/libcst/_nodes/tests/test_for.py +++ b/libcst/_nodes/tests/test_for.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from typing import Any import libcst as cst -from libcst import PartialParserConfig, parse_statement +from libcst import parse_statement, PartialParserConfig from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst.metadata import CodeRange from libcst.testing.utils import data_provider diff --git a/libcst/_nodes/tests/test_funcdef.py b/libcst/_nodes/tests/test_funcdef.py index a5d0d080..4ed7fcc3 100644 --- a/libcst/_nodes/tests/test_funcdef.py +++ b/libcst/_nodes/tests/test_funcdef.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -622,6 +622,46 @@ class FunctionDefCreationTest(CSTNodeTest): "code": "@ bar ( )\n", "expected_position": CodeRange((1, 0), (1, 10)), }, + # Allow nested calls on decorator + { + "node": cst.FunctionDef( + cst.Name("foo"), + cst.Parameters(), + cst.SimpleStatementSuite((cst.Pass(),)), + (cst.Decorator(cst.Call(func=cst.Call(func=cst.Name("bar")))),), + ), + "code": "@bar()()\ndef foo(): pass\n", + }, + # Allow any expression in decorator + { + "node": cst.FunctionDef( + cst.Name("foo"), + cst.Parameters(), + cst.SimpleStatementSuite((cst.Pass(),)), + ( + cst.Decorator( + cst.BinaryOperation(cst.Name("a"), cst.Add(), cst.Name("b")) + ), + ), + ), + "code": "@a + b\ndef foo(): pass\n", + }, + # Allow parentheses around decorator + { + "node": cst.FunctionDef( + cst.Name("foo"), + cst.Parameters(), + cst.SimpleStatementSuite((cst.Pass(),)), + ( + cst.Decorator( + cst.Name( + "bar", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) + ) + ), + ), + ), + "code": "@(bar)\ndef foo(): pass\n", + }, # Parameters { "node": cst.Parameters( @@ -700,6 +740,154 @@ class FunctionDefCreationTest(CSTNodeTest): ) ) def test_valid(self, **kwargs: Any) -> None: + if "native_only" in kwargs: + kwargs.pop("native_only") + self.validate_node(**kwargs) + + @data_provider( + ( + # PEP 646 + { + "node": cst.FunctionDef( + name=cst.Name(value="foo"), + params=cst.Parameters( + params=[], + star_arg=cst.Param( + star="*", + name=cst.Name("a"), + annotation=cst.Annotation( + cst.StarredElement(value=cst.Name("b")), + whitespace_before_indicator=cst.SimpleWhitespace(""), + ), + ), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + "parser": parse_statement, + "code": "def foo(*a: *b): pass\n", + }, + { + "node": cst.FunctionDef( + name=cst.Name(value="foo"), + params=cst.Parameters( + params=[], + star_arg=cst.Param( + star="*", + name=cst.Name("a"), + annotation=cst.Annotation( + cst.StarredElement( + value=cst.Subscript( + value=cst.Name("tuple"), + slice=[ + cst.SubscriptElement( + cst.Index(cst.Name("int")), + comma=cst.Comma(), + ), + cst.SubscriptElement( + cst.Index( + value=cst.Name("Ts"), + star="*", + whitespace_after_star=cst.SimpleWhitespace( + "" + ), + ), + comma=cst.Comma(), + ), + cst.SubscriptElement( + cst.Index(cst.Ellipsis()) + ), + ], + ) + ), + whitespace_before_indicator=cst.SimpleWhitespace(""), + ), + ), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + "parser": parse_statement, + "code": "def foo(*a: *tuple[int,*Ts,...]): pass\n", + }, + # Single type variable + { + "node": cst.FunctionDef( + cst.Name("foo"), + cst.Parameters(), + cst.SimpleStatementSuite((cst.Pass(),)), + type_parameters=cst.TypeParameters( + (cst.TypeParam(cst.TypeVar(cst.Name("T"))),) + ), + ), + "code": "def foo[T](): pass\n", + "parser": parse_statement, + }, + # All the type parameters + { + "node": cst.FunctionDef( + cst.Name("foo"), + cst.Parameters(), + cst.SimpleStatementSuite((cst.Pass(),)), + type_parameters=cst.TypeParameters( + ( + cst.TypeParam( + cst.TypeVar( + cst.Name("T"), + bound=cst.Name("int"), + colon=cst.Colon( + whitespace_after=cst.SimpleWhitespace(" ") + ), + ), + cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), + ), + cst.TypeParam( + cst.TypeVarTuple(cst.Name("Ts")), + cst.Comma(whitespace_after=cst.SimpleWhitespace(" ")), + ), + cst.TypeParam(cst.ParamSpec(cst.Name("KW"))), + ) + ), + ), + "code": "def foo[T: int, *Ts, **KW](): pass\n", + "parser": parse_statement, + }, + # Type parameters with whitespace + { + "node": cst.FunctionDef( + cst.Name("foo"), + cst.Parameters(), + cst.SimpleStatementSuite((cst.Pass(),)), + type_parameters=cst.TypeParameters( + params=( + cst.TypeParam( + param=cst.TypeVar( + cst.Name("T"), + bound=cst.Name("str"), + colon=cst.Colon( + whitespace_before=cst.SimpleWhitespace(" "), + whitespace_after=cst.ParenthesizedWhitespace( + empty_lines=(cst.EmptyLine(),), + indent=True, + ), + ), + ), + comma=cst.Comma(cst.SimpleWhitespace(" ")), + ), + cst.TypeParam( + cst.ParamSpec( + cst.Name("PS"), cst.SimpleWhitespace(" ") + ), + cst.Comma(cst.SimpleWhitespace(" ")), + ), + ) + ), + whitespace_after_type_parameters=cst.SimpleWhitespace(" "), + ), + "code": "def foo[T :\n\nstr ,** PS ,] (): pass\n", + "parser": parse_statement, + }, + ) + ) + def test_valid_native(self, **kwargs: Any) -> None: self.validate_node(**kwargs) @data_provider( @@ -846,22 +1034,6 @@ class FunctionDefCreationTest(CSTNodeTest): ), r"Expecting a star prefix of '\*\*'", ), - # Validate decorator name semantics - ( - lambda: cst.FunctionDef( - cst.Name("foo"), - cst.Parameters(), - cst.SimpleStatementSuite((cst.Pass(),)), - ( - cst.Decorator( - cst.Name( - "bar", lpar=(cst.LeftParen(),), rpar=(cst.RightParen(),) - ) - ), - ), - ), - "Cannot have parens around decorator in a Decorator", - ), ) ) def test_invalid( @@ -875,7 +1047,9 @@ def _parse_statement_force_38(code: str) -> cst.BaseCompoundStatement: code, config=cst.PartialParserConfig(python_version="3.8") ) if not isinstance(statement, cst.BaseCompoundStatement): - raise Exception("This function is expecting to parse compound statements only!") + raise ValueError( + "This function is expecting to parse compound statements only!" + ) return statement @@ -1798,6 +1972,36 @@ class FunctionDefParserTest(CSTNodeTest): ), "code": "def foo(bar, baz, /): pass\n", }, + # Positional only params with whitespace after but no comma + { + "node": cst.FunctionDef( + cst.Name("foo"), + cst.Parameters( + posonly_params=( + cst.Param( + cst.Name("bar"), + star="", + comma=cst.Comma( + whitespace_after=cst.SimpleWhitespace(" ") + ), + ), + cst.Param( + cst.Name("baz"), + star="", + comma=cst.Comma( + whitespace_after=cst.SimpleWhitespace(" ") + ), + ), + ), + posonly_ind=cst.ParamSlash( + whitespace_after=cst.SimpleWhitespace(" ") + ), + ), + cst.SimpleStatementSuite((cst.Pass(),)), + ), + "code": "def foo(bar, baz, / ): pass\n", + "native_only": True, + }, # Typed positional only params { "node": cst.FunctionDef( @@ -2013,7 +2217,7 @@ class FunctionDefParserTest(CSTNodeTest): }, ) ) - def test_valid_38(self, node: cst.CSTNode, code: str) -> None: + def test_valid_38(self, node: cst.CSTNode, code: str, **kwargs: Any) -> None: self.validate_node(node, code, _parse_statement_force_38) @data_provider( @@ -2041,4 +2245,23 @@ class FunctionDefParserTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) + + @data_provider( + ( + {"code": "A[:*b]"}, + {"code": "A[*b:]"}, + {"code": "A[*b:*b]"}, + {"code": "A[*(1:2)]"}, + {"code": "A[*:]"}, + {"code": "A[:*]"}, + {"code": "A[**b]"}, + {"code": "def f(x: *b): pass"}, + {"code": "def f(**x: *b): pass"}, + {"code": "x: *b"}, + ) + ) + def test_parse_error(self, **kwargs: Any) -> None: + self.assert_parses(**kwargs, expect_success=False, parser=parse_statement) diff --git a/libcst/_nodes/tests/test_global.py b/libcst/_nodes/tests/test_global.py index 038c0368..da76f9a3 100644 --- a/libcst/_nodes/tests/test_global.py +++ b/libcst/_nodes/tests/test_global.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_if.py b/libcst/_nodes/tests/test_if.py index 52dffa33..7615614e 100644 --- a/libcst/_nodes/tests/test_if.py +++ b/libcst/_nodes/tests/test_if.py @@ -1,9 +1,9 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Any +from typing import Any, Callable import libcst as cst from libcst import parse_statement @@ -129,3 +129,21 @@ class IfTest(CSTNodeTest): ) def test_valid(self, **kwargs: Any) -> None: self.validate_node(**kwargs) + + @data_provider( + ( + # Validate whitespace handling + ( + lambda: cst.If( + cst.Name("conditional"), + cst.SimpleStatementSuite((cst.Pass(),)), + whitespace_before_test=cst.SimpleWhitespace(""), + ), + "Must have at least one space after 'if' keyword.", + ), + ) + ) + def test_invalid( + self, get_node: Callable[[], cst.CSTNode], expected_re: str + ) -> None: + self.assert_invalid(get_node, expected_re) diff --git a/libcst/_nodes/tests/test_ifexp.py b/libcst/_nodes/tests/test_ifexp.py index ef65b9d5..dd260ef3 100644 --- a/libcst/_nodes/tests/test_ifexp.py +++ b/libcst/_nodes/tests/test_ifexp.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -52,6 +52,41 @@ class IfExpTest(CSTNodeTest): "(foo)if(bar)else(baz)", CodeRange((1, 0), (1, 21)), ), + ( + cst.IfExp( + body=cst.Name("foo"), + whitespace_before_if=cst.SimpleWhitespace(" "), + whitespace_after_if=cst.SimpleWhitespace(" "), + test=cst.Name("bar"), + whitespace_before_else=cst.SimpleWhitespace(" "), + whitespace_after_else=cst.SimpleWhitespace(""), + orelse=cst.IfExp( + body=cst.SimpleString("''"), + whitespace_before_if=cst.SimpleWhitespace(""), + test=cst.Name("bar"), + orelse=cst.Name("baz"), + ), + ), + "foo if bar else''if bar else baz", + CodeRange((1, 0), (1, 32)), + ), + ( + cst.GeneratorExp( + elt=cst.IfExp( + body=cst.Name("foo"), + test=cst.Name("bar"), + orelse=cst.SimpleString("''"), + whitespace_after_else=cst.SimpleWhitespace(""), + ), + for_in=cst.CompFor( + target=cst.Name("_"), + iter=cst.Name("_"), + whitespace_before=cst.SimpleWhitespace(""), + ), + ), + "(foo if bar else''for _ in _)", + CodeRange((1, 1), (1, 28)), + ), # Make sure that spacing works ( cst.IfExp( diff --git a/libcst/_nodes/tests/test_import.py b/libcst/_nodes/tests/test_import.py index f911029c..d6ad8cbe 100644 --- a/libcst/_nodes/tests/test_import.py +++ b/libcst/_nodes/tests/test_import.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -195,6 +195,20 @@ class ImportCreateTest(CSTNodeTest): ), "expected_re": "at least one space", }, + { + "get_node": lambda: cst.Import( + names=( + cst.ImportAlias( + cst.Name("foo"), + asname=cst.AsName( + cst.Name("bar"), + whitespace_before_as=cst.SimpleWhitespace(""), + ), + ), + ), + ), + "expected_re": "at least one space", + }, { "get_node": lambda: cst.Import( names=[ @@ -564,6 +578,25 @@ class ImportFromCreateTest(CSTNodeTest): ), "expected_re": "one space after import", }, + { + "get_node": lambda: cst.ImportFrom( + module=cst.Name("foo"), + names=( + cst.ImportAlias( + cst.Name("bar"), + asname=cst.AsName( + cst.Name( + "baz", + lpar=(cst.LeftParen(),), + rpar=(cst.RightParen(),), + ), + whitespace_before_as=cst.SimpleWhitespace(""), + ), + ), + ), + ), + "expected_re": "one space before as keyword", + }, ) ) def test_invalid(self, **kwargs: Any) -> None: @@ -617,8 +650,10 @@ class ImportFromParseTest(CSTNodeTest): ), cst.ImportAlias(cst.Name("baz"), comma=cst.Comma()), ), + lpar=cst.LeftParen(), + rpar=cst.RightParen(), ), - "code": "from foo import bar, baz,", + "code": "from foo import (bar, baz,)", }, # Star import statement { diff --git a/libcst/_nodes/tests/test_indented_block.py b/libcst/_nodes/tests/test_indented_block.py index 4397c864..8190f2d9 100644 --- a/libcst/_nodes/tests/test_indented_block.py +++ b/libcst/_nodes/tests/test_indented_block.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_lambda.py b/libcst/_nodes/tests/test_lambda.py index 5ff75f1d..64a561ed 100644 --- a/libcst/_nodes/tests/test_lambda.py +++ b/libcst/_nodes/tests/test_lambda.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -30,6 +30,22 @@ class LambdaCreationTest(CSTNodeTest): ), "code": "lambda bar, baz, /: 5", }, + # Test basic positional only params with extra trailing whitespace + { + "node": cst.Lambda( + cst.Parameters( + posonly_params=( + cst.Param(cst.Name("bar")), + cst.Param(cst.Name("baz")), + ), + posonly_ind=cst.ParamSlash( + whitespace_after=cst.SimpleWhitespace(" ") + ), + ), + cst.Integer("5"), + ), + "code": "lambda bar, baz, / : 5", + }, # Test basic positional params ( cst.Lambda( @@ -287,30 +303,6 @@ class LambdaCreationTest(CSTNodeTest): ), "at least one space after lambda", ), - ( - lambda: cst.Lambda( - cst.Parameters(star_arg=cst.Param(cst.Name("arg"))), - cst.Integer("5"), - whitespace_after_lambda=cst.SimpleWhitespace(""), - ), - "at least one space after lambda", - ), - ( - lambda: cst.Lambda( - cst.Parameters(kwonly_params=(cst.Param(cst.Name("arg")),)), - cst.Integer("5"), - whitespace_after_lambda=cst.SimpleWhitespace(""), - ), - "at least one space after lambda", - ), - ( - lambda: cst.Lambda( - cst.Parameters(star_kwarg=cst.Param(cst.Name("arg"))), - cst.Integer("5"), - whitespace_after_lambda=cst.SimpleWhitespace(""), - ), - "at least one space after lambda", - ), ( lambda: cst.Lambda( cst.Parameters( @@ -928,6 +920,53 @@ class LambdaParserTest(CSTNodeTest): ), "( lambda : 5 )", ), + # No space between lambda and params + ( + cst.Lambda( + cst.Parameters(star_arg=cst.Param(cst.Name("args"), star="*")), + cst.Integer("5"), + whitespace_after_lambda=cst.SimpleWhitespace(""), + ), + "lambda*args: 5", + ), + ( + cst.Lambda( + cst.Parameters(star_kwarg=cst.Param(cst.Name("kwargs"), star="**")), + cst.Integer("5"), + whitespace_after_lambda=cst.SimpleWhitespace(""), + ), + "lambda**kwargs: 5", + ), + ( + cst.Lambda( + cst.Parameters( + star_arg=cst.ParamStar( + comma=cst.Comma( + cst.SimpleWhitespace(""), cst.SimpleWhitespace("") + ) + ), + kwonly_params=[cst.Param(cst.Name("args"), star="")], + ), + cst.Integer("5"), + whitespace_after_lambda=cst.SimpleWhitespace(""), + ), + "lambda*,args: 5", + ), + ( + cst.ListComp( + elt=cst.Lambda( + params=cst.Parameters(), + body=cst.Tuple(()), + colon=cst.Colon(), + ), + for_in=cst.CompFor( + target=cst.Name("_"), + iter=cst.Name("_"), + whitespace_before=cst.SimpleWhitespace(""), + ), + ), + "[lambda:()for _ in _]", + ), ) ) def test_valid( diff --git a/libcst/_nodes/tests/test_leaf_small_statements.py b/libcst/_nodes/tests/test_leaf_small_statements.py index 9ab3e499..fb202002 100644 --- a/libcst/_nodes/tests/test_leaf_small_statements.py +++ b/libcst/_nodes/tests/test_leaf_small_statements.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_list.py b/libcst/_nodes/tests/test_list.py index a4a08b95..2f96124c 100644 --- a/libcst/_nodes/tests/test_list.py +++ b/libcst/_nodes/tests/test_list.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -13,7 +13,6 @@ from libcst.testing.utils import data_provider class ListTest(CSTNodeTest): - # A lot of Element/StarredElement tests are provided by the tests for Tuple, so we # we don't need to duplicate them here. @data_provider( @@ -126,4 +125,6 @@ class ListTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) diff --git a/libcst/_nodes/tests/test_match.py b/libcst/_nodes/tests/test_match.py new file mode 100644 index 00000000..2335b7c3 --- /dev/null +++ b/libcst/_nodes/tests/test_match.py @@ -0,0 +1,489 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, Callable + +import libcst as cst +from libcst import parse_statement +from libcst._nodes.tests.base import CSTNodeTest +from libcst.testing.utils import data_provider + +parser: Callable[[str], cst.CSTNode] = parse_statement + + +class MatchTest(CSTNodeTest): + @data_provider( + ( + # Values and singletons + { + "node": cst.Match( + subject=cst.Name("x"), + cases=[ + cst.MatchCase( + pattern=cst.MatchSingleton(cst.Name("None")), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( + pattern=cst.MatchValue(cst.SimpleString('"foo"')), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + ], + ), + "code": "match x:\n" + + " case None: pass\n" + + ' case "foo": pass\n', + "parser": parser, + }, + # Parenthesized value + { + "node": cst.Match( + subject=cst.Name( + value="x", + ), + cases=[ + cst.MatchCase( + pattern=cst.MatchAs( + pattern=cst.MatchValue( + value=cst.Integer( + value="1", + lpar=[ + cst.LeftParen(), + ], + rpar=[ + cst.RightParen(), + ], + ), + ), + name=cst.Name( + value="z", + ), + whitespace_before_as=cst.SimpleWhitespace(" "), + whitespace_after_as=cst.SimpleWhitespace(" "), + ), + body=cst.SimpleStatementSuite([cst.Pass()]), + ), + ], + ), + "code": "match x:\n case (1) as z: pass\n", + "parser": parser, + }, + # List patterns + { + "node": cst.Match( + subject=cst.Name("x"), + cases=[ + cst.MatchCase( # empty list + pattern=cst.MatchList( + [], + lbracket=cst.LeftSquareBracket(), + rbracket=cst.RightSquareBracket(), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # single element list + pattern=cst.MatchList( + [ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")) + ) + ], + lbracket=cst.LeftSquareBracket(), + rbracket=cst.RightSquareBracket(), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # single element list with trailing comma + pattern=cst.MatchList( + [ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + cst.Comma(), + ) + ], + lbracket=cst.LeftSquareBracket(), + rbracket=cst.RightSquareBracket(), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + ], + ), + "code": ( + "match x:\n" + + " case []: pass\n" + + " case [None]: pass\n" + + " case [None,]: pass\n" + ), + "parser": parser, + }, + # Tuple patterns + { + "node": cst.Match( + subject=cst.Name("x"), + cases=[ + cst.MatchCase( # empty tuple + pattern=cst.MatchTuple( + [], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # two element tuple + pattern=cst.MatchTuple( + [ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + cst.Comma(), + ), + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + ), + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # single element tuple with trailing comma + pattern=cst.MatchTuple( + [ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + cst.Comma(), + ) + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # two element tuple + pattern=cst.MatchTuple( + [ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + cst.Comma(), + ), + cst.MatchStar( + comma=cst.Comma(), + ), + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + ), + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + ], + ), + "code": ( + "match x:\n" + + " case (): pass\n" + + " case (None,None): pass\n" + + " case (None,): pass\n" + + " case (None,*_,None): pass\n" + ), + "parser": parser, + }, + # Mapping patterns + { + "node": cst.Match( + subject=cst.Name("x"), + cases=[ + cst.MatchCase( # empty mapping + pattern=cst.MatchMapping( + [], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # two element mapping + pattern=cst.MatchMapping( + [ + cst.MatchMappingElement( + key=cst.SimpleString('"a"'), + pattern=cst.MatchSingleton(cst.Name("None")), + comma=cst.Comma(), + ), + cst.MatchMappingElement( + key=cst.SimpleString('"b"'), + pattern=cst.MatchSingleton(cst.Name("None")), + ), + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # single element mapping with trailing comma + pattern=cst.MatchMapping( + [ + cst.MatchMappingElement( + key=cst.SimpleString('"a"'), + pattern=cst.MatchSingleton(cst.Name("None")), + comma=cst.Comma(), + ) + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # rest + pattern=cst.MatchMapping( + rest=cst.Name("rest"), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + ], + ), + "code": ( + "match x:\n" + + " case {}: pass\n" + + ' case {"a": None,"b": None}: pass\n' + + ' case {"a": None,}: pass\n' + + " case {**rest}: pass\n" + ), + "parser": parser, + }, + # Class patterns + { + "node": cst.Match( + subject=cst.Name("x"), + cases=[ + cst.MatchCase( # empty class + pattern=cst.MatchClass( + cls=cst.Attribute(cst.Name("a"), cst.Name("b")), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # single pattern class + pattern=cst.MatchClass( + cls=cst.Attribute(cst.Name("a"), cst.Name("b")), + patterns=[ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")) + ) + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # single pattern class with trailing comma + pattern=cst.MatchClass( + cls=cst.Attribute(cst.Name("a"), cst.Name("b")), + patterns=[ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + comma=cst.Comma(), + ) + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # single keyword pattern class + pattern=cst.MatchClass( + cls=cst.Attribute(cst.Name("a"), cst.Name("b")), + kwds=[ + cst.MatchKeywordElement( + key=cst.Name("foo"), + pattern=cst.MatchSingleton(cst.Name("None")), + ) + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # single keyword pattern class with trailing comma + pattern=cst.MatchClass( + cls=cst.Attribute(cst.Name("a"), cst.Name("b")), + kwds=[ + cst.MatchKeywordElement( + key=cst.Name("foo"), + pattern=cst.MatchSingleton(cst.Name("None")), + comma=cst.Comma(), + ) + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( # now all at once + pattern=cst.MatchClass( + cls=cst.Attribute(cst.Name("a"), cst.Name("b")), + patterns=[ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + cst.Comma(), + ), + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")), + cst.Comma(), + ), + ], + kwds=[ + cst.MatchKeywordElement( + key=cst.Name("foo"), + pattern=cst.MatchSingleton(cst.Name("None")), + comma=cst.Comma(), + ), + cst.MatchKeywordElement( + key=cst.Name("bar"), + pattern=cst.MatchSingleton(cst.Name("None")), + comma=cst.Comma(), + ), + ], + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + ], + ), + "code": ( + "match x:\n" + + " case a.b(): pass\n" + + " case a.b(None): pass\n" + + " case a.b(None,): pass\n" + + " case a.b(foo=None): pass\n" + + " case a.b(foo=None,): pass\n" + + " case a.b(None,None,foo=None,bar=None,): pass\n" + ), + "parser": parser, + }, + # as pattern + { + "node": cst.Match( + subject=cst.Name("x"), + cases=[ + cst.MatchCase( + pattern=cst.MatchAs(), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( + pattern=cst.MatchAs(name=cst.Name("foo")), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( + pattern=cst.MatchAs( + pattern=cst.MatchSingleton(cst.Name("None")), + name=cst.Name("bar"), + whitespace_before_as=cst.SimpleWhitespace(" "), + whitespace_after_as=cst.SimpleWhitespace(" "), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + ], + ), + "code": "match x:\n" + + " case _: pass\n" + + " case foo: pass\n" + + " case None as bar: pass\n", + "parser": parser, + }, + # or pattern + { + "node": cst.Match( + subject=cst.Name("x"), + cases=[ + cst.MatchCase( + pattern=cst.MatchOr( + [ + cst.MatchOrElement( + cst.MatchSingleton(cst.Name("None")), + cst.BitOr(), + ), + cst.MatchOrElement( + cst.MatchSingleton(cst.Name("False")), + cst.BitOr(), + ), + cst.MatchOrElement( + cst.MatchSingleton(cst.Name("True")) + ), + ] + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ) + ], + ), + "code": "match x:\n case None | False | True: pass\n", + "parser": parser, + }, + { # exercise sentinels + "node": cst.Match( + subject=cst.Name("x"), + cases=[ + cst.MatchCase( + pattern=cst.MatchList( + [cst.MatchStar(), cst.MatchStar()], + lbracket=None, + rbracket=None, + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( + pattern=cst.MatchTuple( + [ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")) + ) + ] + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( + pattern=cst.MatchAs( + pattern=cst.MatchTuple( + [ + cst.MatchSequenceElement( + cst.MatchSingleton(cst.Name("None")) + ) + ] + ), + name=cst.Name("bar"), + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + cst.MatchCase( + pattern=cst.MatchOr( + [ + cst.MatchOrElement( + cst.MatchSingleton(cst.Name("None")), + ), + cst.MatchOrElement( + cst.MatchSingleton(cst.Name("False")), + ), + cst.MatchOrElement( + cst.MatchSingleton(cst.Name("True")) + ), + ] + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + ], + ), + "code": "match x:\n" + + " case *_, *_: pass\n" + + " case (None,): pass\n" + + " case (None,) as bar: pass\n" + + " case None | False | True: pass\n", + "parser": None, + }, + # Match without whitespace between keyword and the expr + { + "node": cst.Match( + subject=cst.Name( + "x", lpar=[cst.LeftParen()], rpar=[cst.RightParen()] + ), + cases=[ + cst.MatchCase( + pattern=cst.MatchSingleton( + cst.Name( + "None", + lpar=[cst.LeftParen()], + rpar=[cst.RightParen()], + ) + ), + body=cst.SimpleStatementSuite((cst.Pass(),)), + whitespace_after_case=cst.SimpleWhitespace( + value="", + ), + ), + ], + whitespace_after_match=cst.SimpleWhitespace( + value="", + ), + ), + "code": "match(x):\n case(None): pass\n", + "parser": parser, + }, + ) + ) + def test_valid(self, **kwargs: Any) -> None: + self.validate_node(**kwargs) diff --git a/libcst/_nodes/tests/test_matrix_multiply.py b/libcst/_nodes/tests/test_matrix_multiply.py index 9f50dd28..500b7aab 100644 --- a/libcst/_nodes/tests/test_matrix_multiply.py +++ b/libcst/_nodes/tests/test_matrix_multiply.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -69,4 +69,6 @@ class NamedExprTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) diff --git a/libcst/_nodes/tests/test_module.py b/libcst/_nodes/tests/test_module.py index 671a23a6..40de8f8e 100644 --- a/libcst/_nodes/tests/test_module.py +++ b/libcst/_nodes/tests/test_module.py @@ -1,13 +1,14 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Tuple, cast +from typing import cast, Tuple import libcst as cst from libcst import parse_module, parse_statement from libcst._nodes.tests.base import CSTNodeTest + from libcst.metadata import CodeRange, MetadataWrapper, PositionProvider from libcst.testing.utils import data_provider @@ -83,6 +84,7 @@ class ModuleTest(CSTNodeTest): "empty_program_with_newline": { "code": "\n", "expected": cst.Module([], has_trailing_newline=True), + "enabled_for_native": False, }, "empty_program_with_comments": { "code": "# some comment\n", @@ -112,7 +114,11 @@ class ModuleTest(CSTNodeTest): }, } ) - def test_parser(self, *, code: str, expected: cst.Module) -> None: + def test_parser( + self, *, code: str, expected: cst.Module, enabled_for_native: bool = True + ) -> None: + if not enabled_for_native: + self.skipTest("Disabled for native parser") self.assertEqual(parse_module(code), expected) @data_provider( diff --git a/libcst/_nodes/tests/test_namedexpr.py b/libcst/_nodes/tests/test_namedexpr.py index 3949bbea..6ebcf978 100644 --- a/libcst/_nodes/tests/test_namedexpr.py +++ b/libcst/_nodes/tests/test_namedexpr.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -22,7 +22,9 @@ def _parse_statement_force_38(code: str) -> cst.BaseCompoundStatement: code, config=cst.PartialParserConfig(python_version="3.8") ) if not isinstance(statement, cst.BaseCompoundStatement): - raise Exception("This function is expecting to parse compound statements only!") + raise ValueError( + "This function is expecting to parse compound statements only!" + ) return statement @@ -166,6 +168,22 @@ class NamedExprTest(CSTNodeTest): "parser": _parse_expression_force_38, "expected_position": None, }, + { + "node": cst.ListComp( + elt=cst.NamedExpr( + cst.Name("_"), + cst.SimpleString("''"), + whitespace_after_walrus=cst.SimpleWhitespace(""), + whitespace_before_walrus=cst.SimpleWhitespace(""), + ), + for_in=cst.CompFor( + target=cst.Name("_"), + iter=cst.Name("_"), + whitespace_before=cst.SimpleWhitespace(""), + ), + ), + "code": "[_:=''for _ in _]", + }, ) ) def test_valid(self, **kwargs: Any) -> None: diff --git a/libcst/_nodes/tests/test_newline.py b/libcst/_nodes/tests/test_newline.py index ab5935bf..4b5e8d8b 100644 --- a/libcst/_nodes/tests/test_newline.py +++ b/libcst/_nodes/tests/test_newline.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_nonlocal.py b/libcst/_nodes/tests/test_nonlocal.py index 86b65301..62178e4a 100644 --- a/libcst/_nodes/tests/test_nonlocal.py +++ b/libcst/_nodes/tests/test_nonlocal.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_number.py b/libcst/_nodes/tests/test_number.py index 517a97c5..f1b53505 100644 --- a/libcst/_nodes/tests/test_number.py +++ b/libcst/_nodes/tests/test_number.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_raise.py b/libcst/_nodes/tests/test_raise.py index 7113357e..4b2253fd 100644 --- a/libcst/_nodes/tests/test_raise.py +++ b/libcst/_nodes/tests/test_raise.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_removal_behavior.py b/libcst/_nodes/tests/test_removal_behavior.py index aeb9745f..709b26f5 100644 --- a/libcst/_nodes/tests/test_removal_behavior.py +++ b/libcst/_nodes/tests/test_removal_behavior.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from typing import Type, Union import libcst as cst -from libcst import RemovalSentinel, parse_module +from libcst import parse_module, RemovalSentinel from libcst._nodes.tests.base import CSTNodeTest from libcst._types import CSTNodeT from libcst._visitors import CSTTransformer @@ -95,7 +95,7 @@ class RemovalBehavior(CSTNodeTest): self, before: str, after: str, visitor: Type[CSTTransformer] ) -> None: if before.endswith("\n") or after.endswith("\n"): - raise Exception("Test cases should not be newline-terminated!") + raise ValueError("Test cases should not be newline-terminated!") # Test doesn't have newline termination case before_module = parse_module(before) diff --git a/libcst/_nodes/tests/test_return.py b/libcst/_nodes/tests/test_return.py index 75aa587a..85356e19 100644 --- a/libcst/_nodes/tests/test_return.py +++ b/libcst/_nodes/tests/test_return.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_set.py b/libcst/_nodes/tests/test_set.py index 434bf0ab..699b458a 100644 --- a/libcst/_nodes/tests/test_set.py +++ b/libcst/_nodes/tests/test_set.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -12,7 +12,6 @@ from libcst.testing.utils import data_provider class ListTest(CSTNodeTest): - # A lot of Element/StarredElement tests are provided by the tests for Tuple, so we # we don't need to duplicate them here. @data_provider( @@ -133,4 +132,6 @@ class ListTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) diff --git a/libcst/_nodes/tests/test_simple_comp.py b/libcst/_nodes/tests/test_simple_comp.py index cf73176b..33ba4164 100644 --- a/libcst/_nodes/tests/test_simple_comp.py +++ b/libcst/_nodes/tests/test_simple_comp.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from typing import Any, Callable import libcst as cst -from libcst import PartialParserConfig, parse_expression, parse_statement +from libcst import parse_expression, parse_statement, PartialParserConfig from libcst._nodes.tests.base import CSTNodeTest from libcst.metadata import CodeRange from libcst.testing.utils import data_provider @@ -41,6 +41,33 @@ class SimpleCompTest(CSTNodeTest): "code": "{a for b in c}", "parser": parse_expression, }, + # non-trivial elt in GeneratorExp + { + "node": cst.GeneratorExp( + cst.BinaryOperation(cst.Name("a1"), cst.Add(), cst.Name("a2")), + cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), + ), + "code": "(a1 + a2 for b in c)", + "parser": parse_expression, + }, + # non-trivial elt in ListComp + { + "node": cst.ListComp( + cst.BinaryOperation(cst.Name("a1"), cst.Add(), cst.Name("a2")), + cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), + ), + "code": "[a1 + a2 for b in c]", + "parser": parse_expression, + }, + # non-trivial elt in SetComp + { + "node": cst.SetComp( + cst.BinaryOperation(cst.Name("a1"), cst.Add(), cst.Name("a2")), + cst.CompFor(target=cst.Name("b"), iter=cst.Name("c")), + ), + "code": "{a1 + a2 for b in c}", + "parser": parse_expression, + }, # async GeneratorExp { "node": cst.GeneratorExp( diff --git a/libcst/_nodes/tests/test_simple_statement.py b/libcst/_nodes/tests/test_simple_statement.py index 5870e56e..847d3e0f 100644 --- a/libcst/_nodes/tests/test_simple_statement.py +++ b/libcst/_nodes/tests/test_simple_statement.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_simple_string.py b/libcst/_nodes/tests/test_simple_string.py new file mode 100644 index 00000000..d9abec50 --- /dev/null +++ b/libcst/_nodes/tests/test_simple_string.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import unittest + +import libcst as cst + + +class TestSimpleString(unittest.TestCase): + def test_quote(self) -> None: + test_cases = [ + ('"a"', '"'), + ("'b'", "'"), + ('""', '"'), + ("''", "'"), + ('"""c"""', '"""'), + ("'''d'''", "'''"), + ('""""e"""', '"""'), + ("''''f'''", "'''"), + ('"""""g"""', '"""'), + ("'''''h'''", "'''"), + ('""""""', '"""'), + ("''''''", "'''"), + ] + + for s, expected_quote in test_cases: + simple_string = cst.SimpleString(s) + actual = simple_string.quote + self.assertEqual(expected_quote, actual) diff --git a/libcst/_nodes/tests/test_simple_whitespace.py b/libcst/_nodes/tests/test_simple_whitespace.py index b571d7f2..fca4139c 100644 --- a/libcst/_nodes/tests/test_simple_whitespace.py +++ b/libcst/_nodes/tests/test_simple_whitespace.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_small_statement.py b/libcst/_nodes/tests/test_small_statement.py index 73e5296f..5defd891 100644 --- a/libcst/_nodes/tests/test_small_statement.py +++ b/libcst/_nodes/tests/test_small_statement.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_subscript.py b/libcst/_nodes/tests/test_subscript.py index 7e39eb5e..44ccba29 100644 --- a/libcst/_nodes/tests/test_subscript.py +++ b/libcst/_nodes/tests/test_subscript.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_template_strings.py b/libcst/_nodes/tests/test_template_strings.py new file mode 100644 index 00000000..6e4c308a --- /dev/null +++ b/libcst/_nodes/tests/test_template_strings.py @@ -0,0 +1,183 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Callable, Optional + +import libcst as cst +from libcst import parse_expression +from libcst._nodes.tests.base import CSTNodeTest +from libcst.metadata import CodeRange +from libcst.testing.utils import data_provider + + +class TemplatedStringTest(CSTNodeTest): + @data_provider( + ( + # Simple t-string with only text + ( + cst.TemplatedString( + parts=(cst.TemplatedStringText("hello world"),), + ), + 't"hello world"', + True, + ), + # t-string with one expression + ( + cst.TemplatedString( + parts=( + cst.TemplatedStringText("hello "), + cst.TemplatedStringExpression( + expression=cst.Name("name"), + ), + ), + ), + 't"hello {name}"', + True, + ), + # t-string with multiple expressions + ( + cst.TemplatedString( + parts=( + cst.TemplatedStringText("a="), + cst.TemplatedStringExpression(expression=cst.Name("a")), + cst.TemplatedStringText(", b="), + cst.TemplatedStringExpression(expression=cst.Name("b")), + ), + ), + 't"a={a}, b={b}"', + True, + CodeRange((1, 0), (1, 15)), + ), + # t-string with nested expression + ( + cst.TemplatedString( + parts=( + cst.TemplatedStringText("sum="), + cst.TemplatedStringExpression( + expression=cst.BinaryOperation( + left=cst.Name("a"), + operator=cst.Add(), + right=cst.Name("b"), + ) + ), + ), + ), + 't"sum={a + b}"', + True, + ), + # t-string with spacing in expression + ( + cst.TemplatedString( + parts=( + cst.TemplatedStringText("x = "), + cst.TemplatedStringExpression( + whitespace_before_expression=cst.SimpleWhitespace(" "), + expression=cst.Name("x"), + whitespace_after_expression=cst.SimpleWhitespace(" "), + ), + ), + ), + 't"x = { x }"', + True, + ), + # t-string with escaped braces + ( + cst.TemplatedString( + parts=(cst.TemplatedStringText("{{foo}}"),), + ), + 't"{{foo}}"', + True, + ), + # t-string with only an expression + ( + cst.TemplatedString( + parts=( + cst.TemplatedStringExpression(expression=cst.Name("value")), + ), + ), + 't"{value}"', + True, + ), + # t-string with whitespace and newlines + ( + cst.TemplatedString( + parts=( + cst.TemplatedStringText("line1\\n"), + cst.TemplatedStringExpression(expression=cst.Name("x")), + cst.TemplatedStringText("\\nline2"), + ), + ), + 't"line1\\n{x}\\nline2"', + True, + ), + # t-string with parenthesis (not typical, but test node construction) + ( + cst.TemplatedString( + lpar=(cst.LeftParen(),), + parts=(cst.TemplatedStringText("foo"),), + rpar=(cst.RightParen(),), + ), + '(t"foo")', + True, + ), + # t-string with whitespace in delimiters + ( + cst.TemplatedString( + lpar=(cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")),), + parts=(cst.TemplatedStringText("foo"),), + rpar=(cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")),), + ), + '( t"foo" )', + True, + ), + # Test TemplatedStringText and TemplatedStringExpression individually + ( + cst.TemplatedStringText("abc"), + "abc", + False, + CodeRange((1, 0), (1, 3)), + ), + ( + cst.TemplatedStringExpression(expression=cst.Name("foo")), + "{foo}", + False, + CodeRange((1, 0), (1, 5)), + ), + ) + ) + def test_valid( + self, + node: cst.CSTNode, + code: str, + check_parsing: bool, + position: Optional[CodeRange] = None, + ) -> None: + if check_parsing: + self.validate_node(node, code, parse_expression, expected_position=position) + else: + self.validate_node(node, code, expected_position=position) + + @data_provider( + ( + ( + lambda: cst.TemplatedString( + parts=(cst.TemplatedStringText("foo"),), + lpar=(cst.LeftParen(),), + ), + "left paren without right paren", + ), + ( + lambda: cst.TemplatedString( + parts=(cst.TemplatedStringText("foo"),), + rpar=(cst.RightParen(),), + ), + "right paren without left paren", + ), + ) + ) + def test_invalid( + self, get_node: Callable[[], cst.CSTNode], expected_re: str + ) -> None: + self.assert_invalid(get_node, expected_re) diff --git a/libcst/_nodes/tests/test_trailing_whitespace.py b/libcst/_nodes/tests/test_trailing_whitespace.py index d11d553e..7b1b2587 100644 --- a/libcst/_nodes/tests/test_trailing_whitespace.py +++ b/libcst/_nodes/tests/test_trailing_whitespace.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_try.py b/libcst/_nodes/tests/test_try.py index df4a6ae4..c5ae2462 100644 --- a/libcst/_nodes/tests/test_try.py +++ b/libcst/_nodes/tests/test_try.py @@ -1,9 +1,9 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Any +from typing import Any, Callable import libcst as cst from libcst import parse_statement @@ -11,6 +11,8 @@ from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock from libcst.metadata import CodeRange from libcst.testing.utils import data_provider +native_parse_statement: Callable[[str], cst.CSTNode] = parse_statement + class TryTest(CSTNodeTest): @data_provider( @@ -324,6 +326,52 @@ class TryTest(CSTNodeTest): "code": "try: pass\nexcept(IOError, ImportError): pass\n", "parser": parse_statement, }, + # No space before as + { + "node": cst.Try( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=[ + cst.ExceptHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + whitespace_after_except=cst.SimpleWhitespace(" "), + type=cst.Call(cst.Name("foo")), + name=cst.AsName( + whitespace_before_as=cst.SimpleWhitespace(""), + name=cst.Name("bar"), + ), + ) + ], + ), + "code": "try: pass\nexcept foo()as bar: pass\n", + }, + # PEP758 - Multiple exceptions with no parentheses + { + "node": cst.Try( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=[ + cst.ExceptHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Tuple( + elements=[ + cst.Element( + value=cst.Name( + value="ValueError", + ), + ), + cst.Element( + value=cst.Name( + value="RuntimeError", + ), + ), + ], + lpar=[], + rpar=[], + ), + ) + ], + ), + "code": "try: pass\nexcept ValueError, RuntimeError: pass\n", + }, ) ) def test_valid(self, **kwargs: Any) -> None: @@ -341,12 +389,6 @@ class TryTest(CSTNodeTest): ), "expected_re": "between 'as'", }, - { - "get_node": lambda: cst.AsName( - cst.Name("bla"), whitespace_before_as=cst.SimpleWhitespace("") - ), - "expected_re": "before 'as'", - }, { "get_node": lambda: cst.ExceptHandler( cst.SimpleStatementSuite((cst.Pass(),)), @@ -407,3 +449,194 @@ class TryTest(CSTNodeTest): ) def test_invalid(self, **kwargs: Any) -> None: self.assert_invalid(**kwargs) + + +class TryStarTest(CSTNodeTest): + @data_provider( + ( + # Try/except with a class + { + "node": cst.TryStar( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=( + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Name("Exception"), + ), + ), + ), + "code": "try: pass\nexcept* Exception: pass\n", + "parser": native_parse_statement, + }, + # Try/except with a named class + { + "node": cst.TryStar( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=( + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Name("Exception"), + name=cst.AsName(cst.Name("exc")), + ), + ), + ), + "code": "try: pass\nexcept* Exception as exc: pass\n", + "parser": native_parse_statement, + "expected_position": CodeRange((1, 0), (2, 30)), + }, + # Try/except with multiple clauses + { + "node": cst.TryStar( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=( + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Name("TypeError"), + name=cst.AsName(cst.Name("e")), + ), + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Name("KeyError"), + name=cst.AsName(cst.Name("e")), + ), + ), + ), + "code": "try: pass\n" + + "except* TypeError as e: pass\n" + + "except* KeyError as e: pass\n", + "parser": native_parse_statement, + "expected_position": CodeRange((1, 0), (3, 27)), + }, + # Simple try/except/finally block + { + "node": cst.TryStar( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=( + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Name("KeyError"), + whitespace_after_except=cst.SimpleWhitespace(""), + ), + ), + finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), + ), + "code": "try: pass\nexcept* KeyError: pass\nfinally: pass\n", + "parser": native_parse_statement, + "expected_position": CodeRange((1, 0), (3, 13)), + }, + # Simple try/except/else block + { + "node": cst.TryStar( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=( + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Name("KeyError"), + whitespace_after_except=cst.SimpleWhitespace(""), + ), + ), + orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), + ), + "code": "try: pass\nexcept* KeyError: pass\nelse: pass\n", + "parser": native_parse_statement, + "expected_position": CodeRange((1, 0), (3, 10)), + }, + # Verify whitespace in various locations + { + "node": cst.TryStar( + leading_lines=(cst.EmptyLine(comment=cst.Comment("# 1")),), + body=cst.SimpleStatementSuite((cst.Pass(),)), + handlers=( + cst.ExceptStarHandler( + leading_lines=(cst.EmptyLine(comment=cst.Comment("# 2")),), + type=cst.Name("TypeError"), + name=cst.AsName( + cst.Name("e"), + whitespace_before_as=cst.SimpleWhitespace(" "), + whitespace_after_as=cst.SimpleWhitespace(" "), + ), + whitespace_after_except=cst.SimpleWhitespace(" "), + whitespace_after_star=cst.SimpleWhitespace(""), + whitespace_before_colon=cst.SimpleWhitespace(" "), + body=cst.SimpleStatementSuite((cst.Pass(),)), + ), + ), + orelse=cst.Else( + leading_lines=(cst.EmptyLine(comment=cst.Comment("# 3")),), + body=cst.SimpleStatementSuite((cst.Pass(),)), + whitespace_before_colon=cst.SimpleWhitespace(" "), + ), + finalbody=cst.Finally( + leading_lines=(cst.EmptyLine(comment=cst.Comment("# 4")),), + body=cst.SimpleStatementSuite((cst.Pass(),)), + whitespace_before_colon=cst.SimpleWhitespace(" "), + ), + whitespace_before_colon=cst.SimpleWhitespace(" "), + ), + "code": "# 1\ntry : pass\n# 2\nexcept *TypeError as e : pass\n# 3\nelse : pass\n# 4\nfinally : pass\n", + "parser": native_parse_statement, + "expected_position": CodeRange((2, 0), (8, 14)), + }, + # Now all together + { + "node": cst.TryStar( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=( + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Name("TypeError"), + name=cst.AsName(cst.Name("e")), + ), + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Name("KeyError"), + name=cst.AsName(cst.Name("e")), + ), + ), + orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))), + finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))), + ), + "code": "try: pass\n" + + "except* TypeError as e: pass\n" + + "except* KeyError as e: pass\n" + + "else: pass\n" + + "finally: pass\n", + "parser": native_parse_statement, + "expected_position": CodeRange((1, 0), (5, 13)), + }, + # PEP758 - Multiple exceptions with no parentheses + { + "node": cst.TryStar( + cst.SimpleStatementSuite((cst.Pass(),)), + handlers=[ + cst.ExceptStarHandler( + cst.SimpleStatementSuite((cst.Pass(),)), + type=cst.Tuple( + elements=[ + cst.Element( + value=cst.Name( + value="ValueError", + ), + comma=cst.Comma( + whitespace_after=cst.SimpleWhitespace(" ") + ), + ), + cst.Element( + value=cst.Name( + value="RuntimeError", + ), + ), + ], + lpar=[], + rpar=[], + ), + ) + ], + ), + "code": "try: pass\nexcept* ValueError, RuntimeError: pass\n", + "parser": native_parse_statement, + }, + ) + ) + def test_valid(self, **kwargs: Any) -> None: + self.validate_node(**kwargs) diff --git a/libcst/_nodes/tests/test_tuple.py b/libcst/_nodes/tests/test_tuple.py index f3a49bed..aa3d68bb 100644 --- a/libcst/_nodes/tests/test_tuple.py +++ b/libcst/_nodes/tests/test_tuple.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -90,40 +90,46 @@ class TupleTest(CSTNodeTest): "parser": parse_expression, "expected_position": CodeRange((1, 1), (1, 11)), }, - # custom parenthesis on StarredElement + # top-level two-element tuple, with one being starred { - "node": cst.Tuple( - [ - cst.StarredElement( - cst.Name("abc"), - lpar=[cst.LeftParen()], - rpar=[cst.RightParen()], - comma=cst.Comma(), + "node": cst.SimpleStatementLine( + body=[ + cst.Expr( + value=cst.Tuple( + [ + cst.Element(cst.Name("one"), comma=cst.Comma()), + cst.StarredElement(cst.Name("two")), + ], + lpar=[], + rpar=[], + ) ) ] ), - "code": "((*abc),)", - "parser": parse_expression, - "expected_position": CodeRange((1, 1), (1, 8)), + "code": "one,*two\n", + "parser": parse_statement, }, - # custom whitespace on StarredElement + # top-level three-element tuple, start/end is starred { - "node": cst.Tuple( - [ - cst.Element(cst.Name("one"), comma=cst.Comma()), - cst.StarredElement( - cst.Name("two"), - whitespace_before_value=cst.SimpleWhitespace(" "), - lpar=[cst.LeftParen()], - rpar=[cst.RightParen()], - ), - ], - lpar=[], - rpar=[], # rpar can't own the trailing whitespace if it's not there + "node": cst.SimpleStatementLine( + body=[ + cst.Expr( + value=cst.Tuple( + [ + cst.StarredElement( + cst.Name("one"), comma=cst.Comma() + ), + cst.Element(cst.Name("two"), comma=cst.Comma()), + cst.StarredElement(cst.Name("three")), + ], + lpar=[], + rpar=[], + ) + ) + ] ), - "code": "one,(* two)", - "parser": parse_expression, - "expected_position": CodeRange((1, 0), (1, 12)), + "code": "*one,two,*three\n", + "parser": parse_statement, }, # missing spaces around tuple, okay with parenthesis { @@ -279,4 +285,6 @@ class TupleTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) diff --git a/libcst/_nodes/tests/test_type_alias.py b/libcst/_nodes/tests/test_type_alias.py new file mode 100644 index 00000000..865135c1 --- /dev/null +++ b/libcst/_nodes/tests/test_type_alias.py @@ -0,0 +1,252 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any + +import libcst as cst +from libcst import parse_statement +from libcst._nodes.tests.base import CSTNodeTest +from libcst.metadata import CodeRange +from libcst.testing.utils import data_provider + + +class TypeAliasCreationTest(CSTNodeTest): + @data_provider( + ( + { + "node": cst.TypeAlias( + cst.Name("foo"), + cst.Name("bar"), + ), + "code": "type foo = bar", + "expected_position": CodeRange((1, 0), (1, 14)), + }, + { + "node": cst.TypeAlias( + cst.Name("foo"), + type_parameters=cst.TypeParameters( + [cst.TypeParam(cst.TypeVar(cst.Name("T")))] + ), + value=cst.BinaryOperation( + cst.Name("bar"), cst.BitOr(), cst.Name("baz") + ), + ), + "code": "type foo[T] = bar | baz", + "expected_position": CodeRange((1, 0), (1, 23)), + }, + { + "node": cst.TypeAlias( + cst.Name("foo"), + type_parameters=cst.TypeParameters( + [ + cst.TypeParam( + cst.TypeVar(cst.Name("T"), bound=cst.Name("str")) + ), + cst.TypeParam(cst.TypeVarTuple(cst.Name("Ts"))), + cst.TypeParam(cst.ParamSpec(cst.Name("KW"))), + ] + ), + value=cst.BinaryOperation( + cst.Name("bar"), cst.BitOr(), cst.Name("baz") + ), + ), + "code": "type foo[T: str, *Ts, **KW] = bar | baz", + "expected_position": CodeRange((1, 0), (1, 39)), + }, + { + "node": cst.TypeAlias( + cst.Name("foo"), + type_parameters=cst.TypeParameters( + [ + cst.TypeParam( + cst.TypeVar(cst.Name("T")), default=cst.Name("str") + ), + ] + ), + value=cst.Name("bar"), + ), + "code": "type foo[T = str] = bar", + "expected_position": CodeRange((1, 0), (1, 23)), + }, + { + "node": cst.TypeAlias( + cst.Name("foo"), + type_parameters=cst.TypeParameters( + [ + cst.TypeParam( + cst.ParamSpec(cst.Name("P")), + default=cst.List( + elements=[ + cst.Element(cst.Name("int")), + cst.Element(cst.Name("str")), + ] + ), + ), + ] + ), + value=cst.Name("bar"), + ), + "code": "type foo[**P = [int, str]] = bar", + "expected_position": CodeRange((1, 0), (1, 32)), + }, + { + "node": cst.TypeAlias( + cst.Name("foo"), + type_parameters=cst.TypeParameters( + [ + cst.TypeParam( + cst.TypeVarTuple(cst.Name("T")), + equal=cst.AssignEqual(), + default=cst.Name("default"), + star="*", + ), + ] + ), + value=cst.Name("bar"), + ), + "code": "type foo[*T = *default] = bar", + "expected_position": CodeRange((1, 0), (1, 29)), + }, + { + "node": cst.TypeAlias( + cst.Name("foo"), + type_parameters=cst.TypeParameters( + [ + cst.TypeParam( + cst.TypeVarTuple(cst.Name("T")), + equal=cst.AssignEqual(), + default=cst.Name("default"), + star="*", + whitespace_after_star=cst.SimpleWhitespace(" "), + ), + ] + ), + value=cst.Name("bar"), + ), + "code": "type foo[*T = * default] = bar", + "expected_position": CodeRange((1, 0), (1, 31)), + }, + ) + ) + def test_valid(self, **kwargs: Any) -> None: + self.validate_node(**kwargs) + + +class TypeAliasParserTest(CSTNodeTest): + @data_provider( + ( + { + "node": cst.SimpleStatementLine( + [ + cst.TypeAlias( + cst.Name("foo"), + cst.Name("bar"), + whitespace_after_name=cst.SimpleWhitespace(" "), + ) + ] + ), + "code": "type foo = bar\n", + "parser": parse_statement, + }, + { + "node": cst.SimpleStatementLine( + [ + cst.TypeAlias( + cst.Name("foo"), + cst.Name("bar"), + type_parameters=cst.TypeParameters( + params=[ + cst.TypeParam( + cst.TypeVar( + cst.Name("T"), cst.Name("str"), cst.Colon() + ), + cst.Comma(), + ), + cst.TypeParam( + cst.ParamSpec( + cst.Name("KW"), + whitespace_after_star=cst.SimpleWhitespace( + " " + ), + ), + cst.Comma( + whitespace_before=cst.SimpleWhitespace(" "), + whitespace_after=cst.SimpleWhitespace(" "), + ), + ), + ], + rbracket=cst.RightSquareBracket( + cst.SimpleWhitespace("") + ), + ), + whitespace_after_name=cst.SimpleWhitespace(" "), + whitespace_after_type=cst.SimpleWhitespace(" "), + whitespace_after_equals=cst.SimpleWhitespace(" "), + whitespace_after_type_parameters=cst.SimpleWhitespace(" "), + semicolon=cst.Semicolon( + whitespace_before=cst.SimpleWhitespace(" "), + whitespace_after=cst.SimpleWhitespace(" "), + ), + ) + ] + ), + "code": "type foo [T:str,** KW , ] = bar ; \n", + "parser": parse_statement, + }, + { + "node": cst.SimpleStatementLine( + [ + cst.TypeAlias( + cst.Name("foo"), + type_parameters=cst.TypeParameters( + [ + cst.TypeParam( + cst.TypeVarTuple(cst.Name("P")), + star="*", + equal=cst.AssignEqual(), + default=cst.Name("default"), + ), + ] + ), + value=cst.Name("bar"), + whitespace_after_name=cst.SimpleWhitespace(" "), + whitespace_after_type_parameters=cst.SimpleWhitespace(" "), + ) + ] + ), + "code": "type foo [*P = *default] = bar\n", + "parser": parse_statement, + }, + { + "node": cst.SimpleStatementLine( + [ + cst.TypeAlias( + cst.Name("foo"), + type_parameters=cst.TypeParameters( + [ + cst.TypeParam( + cst.TypeVarTuple(cst.Name("P")), + star="*", + whitespace_after_star=cst.SimpleWhitespace( + " " + ), + equal=cst.AssignEqual(), + default=cst.Name("default"), + ), + ] + ), + value=cst.Name("bar"), + whitespace_after_name=cst.SimpleWhitespace(" "), + whitespace_after_type_parameters=cst.SimpleWhitespace(" "), + ) + ] + ), + "code": "type foo [*P = * default] = bar\n", + "parser": parse_statement, + }, + ) + ) + def test_valid(self, **kwargs: Any) -> None: + self.validate_node(**kwargs) diff --git a/libcst/_nodes/tests/test_unary_op.py b/libcst/_nodes/tests/test_unary_op.py index c9dbb53b..28062d8b 100644 --- a/libcst/_nodes/tests/test_unary_op.py +++ b/libcst/_nodes/tests/test_unary_op.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_while.py b/libcst/_nodes/tests/test_while.py index 1bdc8976..adf6e17e 100644 --- a/libcst/_nodes/tests/test_while.py +++ b/libcst/_nodes/tests/test_while.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_nodes/tests/test_with.py b/libcst/_nodes/tests/test_with.py index b74487c7..0b396619 100644 --- a/libcst/_nodes/tests/test_with.py +++ b/libcst/_nodes/tests/test_with.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,13 +6,15 @@ from typing import Any import libcst as cst -from libcst import PartialParserConfig, parse_statement +from libcst import parse_statement, PartialParserConfig from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock, parse_statement_as from libcst.metadata import CodeRange from libcst.testing.utils import data_provider class WithTest(CSTNodeTest): + maxDiff: int = 2000 + @data_provider( ( # Simple with block @@ -98,6 +100,23 @@ class WithTest(CSTNodeTest): "code": "with context_mgr() as ctx: pass\n", "parser": parse_statement, }, + { + "node": cst.With( + ( + cst.WithItem( + cst.Call(cst.Name("context_mgr")), + cst.AsName( + cst.Tuple(()), + whitespace_after_as=cst.SimpleWhitespace(""), + whitespace_before_as=cst.SimpleWhitespace(""), + ), + ), + ), + cst.SimpleStatementSuite((cst.Pass(),)), + ), + "code": "with context_mgr()as(): pass\n", + "parser": parse_statement, + }, # indentation { "node": DummyIndentedBlock( @@ -137,25 +156,6 @@ class WithTest(CSTNodeTest): "parser": parse_statement, "expected_position": CodeRange((2, 0), (2, 24)), }, - # Weird spacing rules - { - "node": cst.With( - ( - cst.WithItem( - cst.Call( - cst.Name("context_mgr"), - lpar=(cst.LeftParen(),), - rpar=(cst.RightParen(),), - ) - ), - ), - cst.SimpleStatementSuite((cst.Pass(),)), - whitespace_after_with=cst.SimpleWhitespace(""), - ), - "code": "with(context_mgr()): pass\n", - "parser": parse_statement, - "expected_position": CodeRange((1, 0), (1, 25)), - }, # Whitespace { "node": cst.With( @@ -177,6 +177,63 @@ class WithTest(CSTNodeTest): "parser": parse_statement, "expected_position": CodeRange((1, 0), (1, 36)), }, + # Weird spacing rules, that parse differently depending on whether + # we are using a grammar that included parenthesized with statements. + { + "node": cst.With( + ( + cst.WithItem( + cst.Call( + cst.Name("context_mgr"), + lpar=(), + rpar=(), + ) + ), + ), + cst.SimpleStatementSuite((cst.Pass(),)), + lpar=(cst.LeftParen()), + rpar=(cst.RightParen()), + whitespace_after_with=cst.SimpleWhitespace(""), + ), + "code": "with(context_mgr()): pass\n", + "parser": parse_statement, + "expected_position": CodeRange((1, 0), (1, 25)), + }, + # Multi-line parenthesized with. + { + "node": cst.With( + ( + cst.WithItem( + cst.Call(cst.Name("foo")), + comma=cst.Comma( + whitespace_after=cst.ParenthesizedWhitespace( + first_line=cst.TrailingWhitespace( + whitespace=cst.SimpleWhitespace( + value="", + ), + comment=None, + newline=cst.Newline( + value=None, + ), + ), + empty_lines=[], + indent=True, + last_line=cst.SimpleWhitespace( + value=" ", + ), + ) + ), + ), + cst.WithItem(cst.Call(cst.Name("bar")), comma=cst.Comma()), + ), + cst.SimpleStatementSuite((cst.Pass(),)), + lpar=cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")), + rpar=cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")), + ), + "code": ("with ( foo(),\n" " bar(), ): pass\n"), # noqa + "parser": parse_statement, + "expected_position": CodeRange((1, 0), (2, 21)), + }, ) ) def test_valid(self, **kwargs: Any) -> None: @@ -200,7 +257,8 @@ class WithTest(CSTNodeTest): ), cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)), ), - "expected_re": "The last WithItem in a With cannot have a trailing comma", + "expected_re": "The last WithItem in an unparenthesized With cannot " + + "have a trailing comma.", }, { "get_node": lambda: cst.With( @@ -210,6 +268,26 @@ class WithTest(CSTNodeTest): ), "expected_re": "Must have at least one space after with keyword", }, + { + "get_node": lambda: cst.With( + (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), + cst.SimpleStatementSuite((cst.Pass(),)), + whitespace_after_with=cst.SimpleWhitespace(""), + lpar=cst.LeftParen(), + ), + "expected_re": "Do not mix concrete LeftParen/RightParen with " + + "MaybeSentinel", + }, + { + "get_node": lambda: cst.With( + (cst.WithItem(cst.Call(cst.Name("context_mgr"))),), + cst.SimpleStatementSuite((cst.Pass(),)), + whitespace_after_with=cst.SimpleWhitespace(""), + rpar=cst.RightParen(), + ), + "expected_re": "Do not mix concrete LeftParen/RightParen with " + + "MaybeSentinel", + }, ) ) def test_invalid(self, **kwargs: Any) -> None: @@ -230,4 +308,26 @@ class WithTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) + + def test_adding_parens(self) -> None: + node = cst.With( + ( + cst.WithItem( + cst.Call(cst.Name("foo")), + comma=cst.Comma( + whitespace_after=cst.ParenthesizedWhitespace(), + ), + ), + cst.WithItem(cst.Call(cst.Name("bar")), comma=cst.Comma()), + ), + cst.SimpleStatementSuite((cst.Pass(),)), + lpar=cst.LeftParen(whitespace_after=cst.SimpleWhitespace(" ")), + rpar=cst.RightParen(whitespace_before=cst.SimpleWhitespace(" ")), + ) + module = cst.Module([]) + self.assertEqual( + module.code_for_node(node), ("with ( foo(),\n" "bar(), ): pass\n") # noqa + ) diff --git a/libcst/_nodes/tests/test_yield.py b/libcst/_nodes/tests/test_yield.py index 83263beb..e5085b4d 100644 --- a/libcst/_nodes/tests/test_yield.py +++ b/libcst/_nodes/tests/test_yield.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -240,4 +240,6 @@ class YieldParsingTest(CSTNodeTest): ) ) def test_versions(self, **kwargs: Any) -> None: + if not kwargs.get("expect_success", True): + self.skipTest("parse errors are disabled for native parser") self.assert_parses(**kwargs) diff --git a/libcst/_nodes/whitespace.py b/libcst/_nodes/whitespace.py index 22182ebe..b1332c13 100644 --- a/libcst/_nodes/whitespace.py +++ b/libcst/_nodes/whitespace.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -19,7 +19,6 @@ from libcst._nodes.internal import ( ) from libcst._visitors import CSTVisitorT - # SimpleWhitespace includes continuation characters, which must be followed immediately # by a newline. SimpleWhitespace does not include other kinds of newlines, because those # may have semantic significance. @@ -49,6 +48,8 @@ class BaseParenthesizableWhitespace(CSTNode, ABC): ``iftest``), it has some semantic value. """ + __slots__ = () + # TODO: Should we somehow differentiate places where we require non-zero whitespace # with a separate type? diff --git a/libcst/_parser/__init__.py b/libcst/_parser/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/__init__.py +++ b/libcst/_parser/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/_parsing_check.py b/libcst/_parser/_parsing_check.py new file mode 100644 index 00000000..03283c95 --- /dev/null +++ b/libcst/_parser/_parsing_check.py @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Iterable, Union + +from libcst._exceptions import EOFSentinel +from libcst._parser.parso.pgen2.generator import ReservedString +from libcst._parser.parso.python.token import PythonTokenTypes, TokenType +from libcst._parser.types.token import Token + +_EOF_STR: str = "end of file (EOF)" +_INDENT_STR: str = "an indent" +_DEDENT_STR: str = "a dedent" + + +def get_expected_str( + encountered: Union[Token, EOFSentinel], + expected: Union[Iterable[Union[TokenType, ReservedString]], EOFSentinel], +) -> str: + if ( + isinstance(encountered, EOFSentinel) + or encountered.type is PythonTokenTypes.ENDMARKER + ): + encountered_str = _EOF_STR + elif encountered.type is PythonTokenTypes.INDENT: + encountered_str = _INDENT_STR + elif encountered.type is PythonTokenTypes.DEDENT: + encountered_str = _DEDENT_STR + else: + encountered_str = repr(encountered.string) + + if isinstance(expected, EOFSentinel): + expected_names = [_EOF_STR] + else: + expected_names = sorted( + [ + repr(el.name) if isinstance(el, TokenType) else repr(el.value) + for el in expected + ] + ) + + if len(expected_names) > 10: + # There's too many possibilities, so it's probably not useful to list them. + # Instead, let's just abbreviate the message. + return f"Unexpectedly encountered {encountered_str}." + else: + if len(expected_names) == 1: + expected_str = expected_names[0] + else: + expected_str = f"{', '.join(expected_names[:-1])}, or {expected_names[-1]}" + return f"Encountered {encountered_str}, but expected {expected_str}." diff --git a/libcst/_parser/base_parser.py b/libcst/_parser/base_parser.py index dc7f75ee..d349bb14 100644 --- a/libcst/_parser/base_parser.py +++ b/libcst/_parser/base_parser.py @@ -26,17 +26,12 @@ from dataclasses import dataclass, field from typing import Generic, Iterable, List, Sequence, TypeVar, Union -from libcst._exceptions import ( - EOFSentinel, - ParserSyntaxError, - PartialParserSyntaxError, - get_expected_str, -) +from libcst._exceptions import EOFSentinel, ParserSyntaxError, PartialParserSyntaxError +from libcst._parser._parsing_check import get_expected_str from libcst._parser.parso.pgen2.generator import DFAState, Grammar, ReservedString from libcst._parser.parso.python.token import TokenType from libcst._parser.types.token import Token - _NodeT = TypeVar("_NodeT") _TokenTypeT = TypeVar("_TokenTypeT", bound=TokenType) _TokenT = TypeVar("_TokenT", bound=Token) @@ -104,7 +99,7 @@ class BaseParser(Generic[_TokenT, _TokenTypeT, _NodeT]): def parse(self) -> _NodeT: # Ensure that we don't re-use parsers. if self.__was_parse_called: - raise Exception("Each parser object may only be used to parse once.") + raise ValueError("Each parser object may only be used to parse once.") self.__was_parse_called = True for token in self.tokens: @@ -130,11 +125,9 @@ class BaseParser(Generic[_TokenT, _TokenTypeT, _NodeT]): def convert_nonterminal( self, nonterminal: str, children: Sequence[_NodeT] - ) -> _NodeT: - ... + ) -> _NodeT: ... - def convert_terminal(self, token: _TokenT) -> _NodeT: - ... + def convert_terminal(self, token: _TokenT) -> _NodeT: ... def _add_token(self, token: _TokenT) -> None: """ @@ -205,7 +198,6 @@ class BaseParser(Generic[_TokenT, _TokenTypeT, _NodeT]): ) # Logically, `plan` is always defined, but pyre can't reasonably determine that. - # pyre-fixme[18]: Global name `plan` is undefined. stack[-1].dfa = plan.next_dfa for push in plan.dfa_pushes: diff --git a/libcst/_parser/conversions/__init__.py b/libcst/_parser/conversions/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/conversions/__init__.py +++ b/libcst/_parser/conversions/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/conversions/expression.py b/libcst/_parser/conversions/expression.py index b7e5c189..79d7ad78 100644 --- a/libcst/_parser/conversions/expression.py +++ b/libcst/_parser/conversions/expression.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -12,7 +12,8 @@ from tokenize import ( Intnumber as INTNUMBER_RE, ) -from libcst._exceptions import PartialParserSyntaxError +from libcst import CSTLogicError +from libcst._exceptions import ParserSyntaxError, PartialParserSyntaxError from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.expression import ( Arg, @@ -121,7 +122,6 @@ from libcst._parser.types.partials import ( from libcst._parser.types.token import Token from libcst._parser.whitespace_parser import parse_parenthesizable_whitespace - BINOP_TOKEN_LUT: typing.Dict[str, typing.Type[BaseBinaryOp]] = { "*": Multiply, "@": MatrixMultiply, @@ -328,7 +328,12 @@ def convert_boolop( # Convert all of the operations that have no precedence in a loop for op, rightexpr in grouper(rightexprs, 2): if op.string not in BOOLOP_TOKEN_LUT: - raise Exception(f"Unexpected token '{op.string}'!") + raise ParserSyntaxError( + f"Unexpected token '{op.string}'!", + lines=config.lines, + raw_line=0, + raw_column=0, + ) leftexpr = BooleanOperation( left=leftexpr, # pyre-ignore Pyre thinks that the type of the LUT is CSTNode. @@ -421,7 +426,12 @@ def convert_comp_op( ) else: # this should be unreachable - raise Exception(f"Unexpected token '{op.string}'!") + raise ParserSyntaxError( + f"Unexpected token '{op.string}'!", + lines=config.lines, + raw_line=0, + raw_column=0, + ) else: # A two-token comparison leftcomp, rightcomp = children @@ -452,7 +462,12 @@ def convert_comp_op( ) else: # this should be unreachable - raise Exception(f"Unexpected token '{leftcomp.string} {rightcomp.string}'!") + raise ParserSyntaxError( + f"Unexpected token '{leftcomp.string} {rightcomp.string}'!", + lines=config.lines, + raw_line=0, + raw_column=0, + ) @with_production("star_expr", "'*' expr") @@ -494,7 +509,12 @@ def convert_binop( # Convert all of the operations that have no precedence in a loop for op, rightexpr in grouper(rightexprs, 2): if op.string not in BINOP_TOKEN_LUT: - raise Exception(f"Unexpected token '{op.string}'!") + raise ParserSyntaxError( + f"Unexpected token '{op.string}'!", + lines=config.lines, + raw_line=0, + raw_column=0, + ) leftexpr = BinaryOperation( left=leftexpr, # pyre-ignore Pyre thinks that the type of the LUT is CSTNode. @@ -541,7 +561,12 @@ def convert_factor( ) ) else: - raise Exception(f"Unexpected token '{op.string}'!") + raise ParserSyntaxError( + f"Unexpected token '{op.string}'!", + lines=config.lines, + raw_line=0, + raw_column=0, + ) return WithLeadingWhitespace( UnaryOperation(operator=opnode, expression=factor.value), op.whitespace_before @@ -582,8 +607,7 @@ def convert_atom_expr( return child -@with_production("atom_expr_await", "'await' atom_expr_trailer", version=">=3.7") -@with_production("atom_expr_await", "AWAIT atom_expr_trailer", version="<=3.6") +@with_production("atom_expr_await", "AWAIT atom_expr_trailer") def convert_atom_expr_await( config: ParserConfig, children: typing.Sequence[typing.Any] ) -> typing.Any: @@ -618,6 +642,8 @@ def convert_atom_expr_trailer( config, trailer.whitespace_before ), lbracket=trailer.lbracket, + # pyre-fixme[6]: Expected `Sequence[SubscriptElement]` for 4th param + # but got `Union[typing.Sequence[SubscriptElement], Index, Slice]`. slice=trailer.slice, rbracket=trailer.rbracket, ) @@ -645,11 +671,13 @@ def convert_atom_expr_trailer( config, trailer.lpar.whitespace_before ), whitespace_before_args=trailer.lpar.value.whitespace_after, + # pyre-fixme[6]: Expected `Sequence[Arg]` for 4th param but got + # `Tuple[object, ...]`. args=tuple(args), ) else: # This is an invalid trailer, so lets give up - raise Exception("Logic error!") + raise CSTLogicError() return WithLeadingWhitespace(atom, whitespace_before) @@ -780,13 +808,10 @@ def convert_subscript( first_colon=Colon( whitespace_before=parse_parenthesizable_whitespace( config, - # pyre-fixme[16]: Optional type has no attribute - # `whitespace_before`. first_colon.whitespace_before, ), whitespace_after=parse_parenthesizable_whitespace( config, - # pyre-fixme[16]: Optional type has no attribute `whitespace_after`. first_colon.whitespace_after, ), ), @@ -871,9 +896,19 @@ def convert_atom_basic( Imaginary(child.string), child.whitespace_before ) else: - raise Exception("Unparseable number {child.string}") + raise ParserSyntaxError( + f"Unparseable number {child.string}", + lines=config.lines, + raw_line=0, + raw_column=0, + ) else: - raise Exception(f"Logic error, unexpected token {child.type.name}") + raise ParserSyntaxError( + f"Logic error, unexpected token {child.type.name}", + lines=config.lines, + raw_line=0, + raw_column=0, + ) @with_production("atom_squarebrackets", "'[' [testlist_comp_list] ']'") @@ -950,7 +985,10 @@ def convert_atom_parens( inner_atom = atoms[0].value return WithLeadingWhitespace( inner_atom.with_changes( - lpar=(lpar, *inner_atom.lpar), rpar=(*inner_atom.rpar, rpar) + # pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`. + lpar=(lpar, *inner_atom.lpar), + # pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`. + rpar=(*inner_atom.rpar, rpar), ), lpar_tok.whitespace_before, ) @@ -1038,13 +1076,13 @@ def convert_fstring_equality( @with_production( "fstring_expr", - "'{' testlist_comp_tuple [ fstring_equality ] [ fstring_conversion ] [ fstring_format_spec ] '}'", + "'{' (testlist_comp_tuple | yield_expr) [ fstring_equality ] [ fstring_conversion ] [ fstring_format_spec ] '}'", version=">=3.8", ) @with_production( "fstring_expr", - "'{' testlist_comp_tuple [ fstring_conversion ] [ fstring_format_spec ] '}'", - version="<=3.7", + "'{' (testlist_comp_tuple | yield_expr) [ fstring_conversion ] [ fstring_format_spec ] '}'", + version="<3.8", ) def convert_fstring_expr( config: ParserConfig, children: typing.Sequence[typing.Any] @@ -1239,7 +1277,6 @@ def _convert_sequencelike( # lpar/rpar are the responsibility of our parent return WithLeadingWhitespace( - # pyre-ignore[29]: `Union[Type[List], Type[Set], Type[Tuple]]` is not a function. sequence_type(elements, lpar=(), rpar=()), children[0].whitespace_before, ) @@ -1446,7 +1483,7 @@ def convert_arg_assign_comp_for( if equal.string == ":=": val = convert_namedexpr_test(config, children) if not isinstance(val, WithLeadingWhitespace): - raise Exception( + raise TypeError( f"convert_namedexpr_test returned {val!r}, not WithLeadingWhitespace" ) return Arg(value=val.value) @@ -1510,8 +1547,7 @@ def convert_sync_comp_for( ) -@with_production("comp_for", "['async'] sync_comp_for", version=">=3.7") -@with_production("comp_for", "[ASYNC] sync_comp_for", version="==3.6") +@with_production("comp_for", "[ASYNC] sync_comp_for", version=">=3.6") @with_production("comp_for", "sync_comp_for", version="<=3.5") def convert_comp_for( config: ParserConfig, children: typing.Sequence[typing.Any] diff --git a/libcst/_parser/conversions/module.py b/libcst/_parser/conversions/module.py index ad3bed66..b40641d0 100644 --- a/libcst/_parser/conversions/module.py +++ b/libcst/_parser/conversions/module.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/conversions/params.py b/libcst/_parser/conversions/params.py index 597e7ac9..5b29f95d 100644 --- a/libcst/_parser/conversions/params.py +++ b/libcst/_parser/conversions/params.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,6 +6,7 @@ from typing import Any, List, Optional, Sequence, Union +from libcst import CSTLogicError from libcst._exceptions import PartialParserSyntaxError from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.expression import ( @@ -121,7 +122,7 @@ def convert_argslist( # noqa: C901 # Example code: # def fn(*abc, *): ... # This should be unreachable, the grammar already disallows it. - raise Exception( + raise ValueError( "Cannot have multiple star ('*') markers in a single argument " + "list." ) @@ -136,11 +137,10 @@ def convert_argslist( # noqa: C901 # Example code: # def fn(foo, /, *, /, bar): ... # This should be unreachable, the grammar already disallows it. - raise Exception( + raise ValueError( "Cannot have multiple slash ('/') markers in a single argument " + "list." ) - # pyre-ignore Pyre seems to think param.star.__eq__ is not callable elif isinstance(param.star, str) and param.star == "" and param.default is None: # Can only add this if we're in the params or kwonly_params section if current_param is params and not seen_default: @@ -156,7 +156,6 @@ def convert_argslist( # noqa: C901 ) elif ( isinstance(param.star, str) - # pyre-ignore Pyre seems to think param.star.__eq__ is not callable and param.star == "" and param.default is not None ): @@ -170,12 +169,9 @@ def convert_argslist( # noqa: C901 # Example code: # def fn(**kwargs, trailing=None) # This should be unreachable, the grammar already disallows it. - raise Exception("Cannot have any arguments after a kwargs expansion.") + raise ValueError("Cannot have any arguments after a kwargs expansion.") elif ( - isinstance(param.star, str) - # pyre-ignore Pyre seems to think param.star.__eq__ is not callable - and param.star == "*" - and param.default is None + isinstance(param.star, str) and param.star == "*" and param.default is None ): # Can only add this if we're in params, since we only allow one of # "*" or "*param". @@ -186,15 +182,12 @@ def convert_argslist( # noqa: C901 # Example code: # def fn(*first, *second): ... # This should be unreachable, the grammar already disallows it. - raise Exception( + raise ValueError( "Expected a keyword argument but found a starred positional " + "argument expansion." ) elif ( - isinstance(param.star, str) - # pyre-ignore Pyre seems to think param.star.__eq__ is not callable - and param.star == "**" - and param.default is None + isinstance(param.star, str) and param.star == "**" and param.default is None ): # Can add this in all cases where we don't have a star_kwarg # yet. @@ -205,13 +198,13 @@ def convert_argslist( # noqa: C901 # Example code: # def fn(**first, **second) # This should be unreachable, the grammar already disallows it. - raise Exception( + raise ValueError( "Multiple starred keyword argument expansions are not allowed in a " + "single argument list" ) else: # The state machine should never end up here. - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") return current_param diff --git a/libcst/_parser/conversions/statement.py b/libcst/_parser/conversions/statement.py index 8ff7ac8f..f96c6ea2 100644 --- a/libcst/_parser/conversions/statement.py +++ b/libcst/_parser/conversions/statement.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,8 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Type -from libcst._exceptions import PartialParserSyntaxError +from libcst import CSTLogicError +from libcst._exceptions import ParserSyntaxError, PartialParserSyntaxError from libcst._maybe_sentinel import MaybeSentinel from libcst._nodes.expression import ( Annotation, @@ -101,7 +102,6 @@ from libcst._parser.whitespace_parser import ( parse_simple_whitespace, ) - AUGOP_TOKEN_LUT: Dict[str, Type[BaseAugOp]] = { "+=": AddAssign, "-=": SubtractAssign, @@ -284,7 +284,9 @@ def convert_annassign(config: ParserConfig, children: Sequence[Any]) -> Any: whitespace_after=parse_simple_whitespace(config, equal.whitespace_after), ) else: - raise Exception("Invalid parser state!") + raise ParserSyntaxError( + "Invalid parser state!", lines=config.lines, raw_line=0, raw_column=0 + ) return AnnAssignPartial( annotation=Annotation( @@ -320,7 +322,13 @@ def convert_annassign(config: ParserConfig, children: Sequence[Any]) -> Any: def convert_augassign(config: ParserConfig, children: Sequence[Any]) -> Any: op, expr = children if op.string not in AUGOP_TOKEN_LUT: - raise Exception(f"Unexpected token '{op.string}'!") + raise ParserSyntaxError( + f"Unexpected token '{op.string}'!", + lines=config.lines, + raw_line=0, + raw_column=0, + ) + return AugAssignPartial( # pyre-ignore Pyre seems to think that the value of this LUT is CSTNode operator=AUGOP_TOKEN_LUT[op.string]( @@ -448,7 +456,7 @@ def convert_import_relative(config: ParserConfig, children: Sequence[Any]) -> An # This should be the dotted name, and we can't get more than # one, but lets be sure anyway if dotted_name is not None: - raise Exception("Logic error!") + raise CSTLogicError() dotted_name = child return ImportRelativePartial(relative=tuple(dots), module=dotted_name) @@ -645,7 +653,7 @@ def convert_raise_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: item=source.value, ) else: - raise Exception("Logic error!") + raise CSTLogicError() return WithLeadingWhitespace( Raise(whitespace_after_raise=whitespace_after_raise, exc=exc, cause=cause), @@ -894,7 +902,7 @@ def convert_try_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: if isinstance(clause, Token): if clause.string == "else": if orelse is not None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") orelse = Else( leading_lines=parse_empty_lines(config, clause.whitespace_before), whitespace_before_colon=parse_simple_whitespace( @@ -904,7 +912,7 @@ def convert_try_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: ) elif clause.string == "finally": if finalbody is not None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") finalbody = Finally( leading_lines=parse_empty_lines(config, clause.whitespace_before), whitespace_before_colon=parse_simple_whitespace( @@ -913,7 +921,7 @@ def convert_try_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: body=suite, ) else: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") elif isinstance(clause, ExceptClausePartial): handlers.append( ExceptHandler( @@ -928,7 +936,7 @@ def convert_try_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: ) ) else: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") return Try( leading_lines=parse_empty_lines(config, trytoken.whitespace_before), @@ -1063,8 +1071,7 @@ def _extract_async( return (parse_empty_lines(config, whitespace_before), asyncnode, stmt.value) -@with_production("asyncable_funcdef", "['async'] funcdef", version=">=3.7") -@with_production("asyncable_funcdef", "[ASYNC] funcdef", version=">=3.5,<3.7") +@with_production("asyncable_funcdef", "[ASYNC] funcdef", version=">=3.5") @with_production("asyncable_funcdef", "funcdef", version="<3.5") def convert_asyncable_funcdef(config: ParserConfig, children: Sequence[Any]) -> Any: leading_lines, asyncnode, funcdef = _extract_async(config, children) @@ -1302,6 +1309,10 @@ def convert_decorated(config: ParserConfig, children: Sequence[Any]) -> Any: # Now, modify the original function or class to add the decorators. return class_or_func.with_changes( leading_lines=leading_lines, + # pyre-fixme[60]: Concatenation not yet support for multiple variadic + # tuples: `*class_or_func.leading_lines, + # *class_or_func.lines_after_decorators`. + # pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`. lines_after_decorators=( *class_or_func.leading_lines, *class_or_func.lines_after_decorators, @@ -1311,10 +1322,7 @@ def convert_decorated(config: ParserConfig, children: Sequence[Any]) -> Any: @with_production( - "asyncable_stmt", "['async'] (funcdef | with_stmt | for_stmt)", version=">=3.7" -) -@with_production( - "asyncable_stmt", "[ASYNC] (funcdef | with_stmt | for_stmt)", version=">=3.5,<3.7" + "asyncable_stmt", "[ASYNC] (funcdef | with_stmt | for_stmt)", version=">=3.5" ) @with_production("asyncable_stmt", "funcdef | with_stmt | for_stmt", version="<3.5") def convert_asyncable_stmt(config: ParserConfig, children: Sequence[Any]) -> Any: @@ -1334,7 +1342,7 @@ def convert_asyncable_stmt(config: ParserConfig, children: Sequence[Any]) -> Any asynchronous=asyncnode, leading_lines=leading_lines ) else: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") @with_production("suite", "simple_stmt_suite | indented_suite") diff --git a/libcst/_parser/conversions/terminals.py b/libcst/_parser/conversions/terminals.py index 96d9391b..f5697229 100644 --- a/libcst/_parser/conversions/terminals.py +++ b/libcst/_parser/conversions/terminals.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/custom_itertools.py b/libcst/_parser/custom_itertools.py index ccbb1a1f..81cfdb4b 100644 --- a/libcst/_parser/custom_itertools.py +++ b/libcst/_parser/custom_itertools.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,6 @@ from itertools import zip_longest from typing import Iterable, Iterator, TypeVar - _T = TypeVar("_T") diff --git a/libcst/_parser/detect_config.py b/libcst/_parser/detect_config.py index 0209d0a2..375a4f07 100644 --- a/libcst/_parser/detect_config.py +++ b/libcst/_parser/detect_config.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,7 +9,7 @@ import re from dataclasses import dataclass from io import BytesIO from tokenize import detect_encoding as py_tokenize_detect_encoding -from typing import FrozenSet, Iterable, Iterator, Pattern, Set, Union +from typing import FrozenSet, Iterable, Iterator, Pattern, Set, Tuple, Union from libcst._nodes.whitespace import NEWLINE_RE from libcst._parser.parso.python.token import PythonTokenTypes, TokenType @@ -18,7 +18,6 @@ from libcst._parser.types.config import AutoConfig, ParserConfig, PartialParserC from libcst._parser.types.token import Token from libcst._parser.wrapped_tokenize import tokenize_lines - _INDENT: TokenType = PythonTokenTypes.INDENT _NAME: TokenType = PythonTokenTypes.NAME _NEWLINE: TokenType = PythonTokenTypes.NEWLINE @@ -115,6 +114,23 @@ def _detect_future_imports(tokens: Iterable[Token]) -> FrozenSet[str]: return frozenset(future_imports) +def convert_to_utf8( + source: Union[str, bytes], *, partial: PartialParserConfig +) -> Tuple[str, str]: + """ + Returns an (original encoding, converted source) tuple. + """ + partial_encoding = partial.encoding + encoding = ( + _detect_encoding(source) + if isinstance(partial_encoding, AutoConfig) + else partial_encoding + ) + + source_str = source if isinstance(source, str) else source.decode(encoding) + return (encoding, source_str) + + def detect_config( source: Union[str, bytes], *, @@ -129,14 +145,7 @@ def detect_config( python_version = partial.parsed_python_version - partial_encoding = partial.encoding - encoding = ( - _detect_encoding(source) - if isinstance(partial_encoding, AutoConfig) - else partial_encoding - ) - - source_str = source if isinstance(source, str) else source.decode(encoding) + encoding, source_str = convert_to_utf8(source, partial=partial) partial_default_newline = partial.default_newline default_newline = ( @@ -163,7 +172,7 @@ def detect_config( lines = split_lines(source_str, keepends=True) - tokens = tokenize_lines(lines, python_version) + tokens = tokenize_lines(source_str, lines, python_version) partial_default_indent = partial.default_indent if isinstance(partial_default_indent, AutoConfig): diff --git a/libcst/_parser/entrypoints.py b/libcst/_parser/entrypoints.py index 1a27e976..bab45ece 100644 --- a/libcst/_parser/entrypoints.py +++ b/libcst/_parser/entrypoints.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,18 +9,16 @@ parser. A parser entrypoint should take the source code and some configuration information """ +from functools import partial from typing import Union from libcst._nodes.base import CSTNode from libcst._nodes.expression import BaseExpression from libcst._nodes.module import Module from libcst._nodes.statement import BaseCompoundStatement, SimpleStatementLine -from libcst._parser.detect_config import detect_config -from libcst._parser.grammar import get_grammar, validate_grammar -from libcst._parser.python_parser import PythonCSTParser +from libcst._parser.detect_config import convert_to_utf8 from libcst._parser.types.config import PartialParserConfig - _DEFAULT_PARTIAL_PARSER_CONFIG: PartialParserConfig = PartialParserConfig() @@ -32,25 +30,21 @@ def _parse( detect_trailing_newline: bool, detect_default_newline: bool, ) -> CSTNode: - detection_result = detect_config( - source, - partial=config, - detect_trailing_newline=detect_trailing_newline, - detect_default_newline=detect_default_newline, - ) - validate_grammar() - grammar = get_grammar(config.parsed_python_version, config.future_imports) - parser = PythonCSTParser( - tokens=detection_result.tokens, - config=detection_result.config, - pgen_grammar=grammar, - start_nonterminal=entrypoint, - ) - # The parser has an Any return type, we can at least refine it to CSTNode here. - result = parser.parse() - assert isinstance(result, CSTNode) - return result + encoding, source_str = convert_to_utf8(source, partial=config) + + from libcst import native + + if entrypoint == "file_input": + parse = partial(native.parse_module, encoding=encoding) + elif entrypoint == "stmt_input": + parse = native.parse_statement + elif entrypoint == "expression_input": + parse = native.parse_expression + else: + raise ValueError(f"Unknown parser entry point: {entrypoint}") + + return parse(source_str) def parse_module( diff --git a/libcst/_parser/grammar.py b/libcst/_parser/grammar.py index c8c24025..ee65ef72 100644 --- a/libcst/_parser/grammar.py +++ b/libcst/_parser/grammar.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -133,15 +133,14 @@ from libcst._parser.conversions.terminals import ( convert_OP, convert_STRING, ) -from libcst._parser.parso.pgen2.generator import Grammar, generate_grammar +from libcst._parser.parso.pgen2.generator import generate_grammar, Grammar from libcst._parser.parso.python.token import PythonTokenTypes, TokenType -from libcst._parser.parso.utils import PythonVersionInfo, parse_version_string +from libcst._parser.parso.utils import parse_version_string, PythonVersionInfo from libcst._parser.production_decorator import get_productions from libcst._parser.types.config import AutoConfig from libcst._parser.types.conversions import NonterminalConversion, TerminalConversion from libcst._parser.types.production import Production - # Keep this sorted alphabetically _TERMINAL_CONVERSIONS_SEQUENCE: Tuple[TerminalConversion, ...] = ( convert_DEDENT, @@ -320,7 +319,7 @@ def validate_grammar() -> None: production_name = fn_productions[0].name expected_name = f"convert_{production_name}" if fn.__name__ != expected_name: - raise Exception( + raise ValueError( f"The conversion function for '{production_name}' " + f"must be called '{expected_name}', not '{fn.__name__}'." ) @@ -331,7 +330,7 @@ def _get_version_comparison(version: str) -> Tuple[str, PythonVersionInfo]: return (version[:2], parse_version_string(version[2:].strip())) if version[:1] in (">", "<"): return (version[:1], parse_version_string(version[1:].strip())) - raise Exception(f"Invalid version comparison specifier '{version}'") + raise ValueError(f"Invalid version comparison specifier '{version}'") def _compare_versions( @@ -351,7 +350,7 @@ def _compare_versions( return actual_version > requested_version if comparison == "<": return actual_version < requested_version - raise Exception(f"Invalid version comparison specifier '{comparison}'") + raise ValueError(f"Invalid version comparison specifier '{comparison}'") def _should_include( @@ -406,7 +405,7 @@ def get_nonterminal_conversions( if not _should_include_future(fn_production.future, future_imports): continue if fn_production.name in conversions: - raise Exception( + raise ValueError( f"Found duplicate '{fn_production.name}' production in grammar" ) conversions[fn_production.name] = fn diff --git a/libcst/_parser/parso/__init__.py b/libcst/_parser/parso/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/parso/__init__.py +++ b/libcst/_parser/parso/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/parso/pgen2/__init__.py b/libcst/_parser/parso/pgen2/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/parso/pgen2/__init__.py +++ b/libcst/_parser/parso/pgen2/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/parso/pgen2/generator.py b/libcst/_parser/parso/pgen2/generator.py index 546cc85f..5e83741b 100644 --- a/libcst/_parser/parso/pgen2/generator.py +++ b/libcst/_parser/parso/pgen2/generator.py @@ -39,7 +39,6 @@ from typing import Any, Generic, Mapping, Sequence, Set, TypeVar, Union from libcst._parser.parso.pgen2.grammar_parser import GrammarParser, NFAState - _TokenTypeT = TypeVar("_TokenTypeT") @@ -73,9 +72,9 @@ class DFAState(Generic[_TokenTypeT]): def __init__(self, from_rule: str, nfa_set: Set[NFAState], final: NFAState) -> None: self.from_rule = from_rule self.nfa_set = nfa_set - self.arcs: Mapping[ - str, DFAState - ] = {} # map from terminals/nonterminals to DFAState + self.arcs: Mapping[str, DFAState] = ( + {} + ) # map from terminals/nonterminals to DFAState # In an intermediary step we set these nonterminal arcs (which has the # same structure as arcs). These don't contain terminals anymore. self.nonterminal_arcs: Mapping[str, DFAState] = {} @@ -260,7 +259,7 @@ def generate_grammar(bnf_grammar: str, token_namespace: Any) -> Grammar[Any]: _calculate_tree_traversal(rule_to_dfas) if start_nonterminal is None: - raise Exception("could not find starting nonterminal!") + raise ValueError("could not find starting nonterminal!") return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) diff --git a/libcst/_parser/parso/pgen2/grammar_parser.py b/libcst/_parser/parso/pgen2/grammar_parser.py index 5d0f2229..0d30199d 100644 --- a/libcst/_parser/parso/pgen2/grammar_parser.py +++ b/libcst/_parser/parso/pgen2/grammar_parser.py @@ -93,14 +93,10 @@ class GrammarParser: def _parse_items(self): # items: item+ a, b = self._parse_item() - while ( - self.type - in ( - PythonTokenTypes.NAME, - PythonTokenTypes.STRING, - ) - or self.value in ("(", "[") - ): + while self.type in ( + PythonTokenTypes.NAME, + PythonTokenTypes.STRING, + ) or self.value in ("(", "["): c, d = self._parse_item() # Need to end on the next item. b.add_arc(c) diff --git a/libcst/_parser/parso/python/__init__.py b/libcst/_parser/parso/python/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/parso/python/__init__.py +++ b/libcst/_parser/parso/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/parso/python/py_token.py b/libcst/_parser/parso/python/py_token.py new file mode 100644 index 00000000..204ce94d --- /dev/null +++ b/libcst/_parser/parso/python/py_token.py @@ -0,0 +1,48 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. +# +# Modifications: +# Copyright David Halter and Contributors +# Modifications are dual-licensed: MIT and PSF. +# 99% of the code is different from pgen2, now. +# +# A fork of `parso.python.token`. +# https://github.com/davidhalter/parso/blob/master/parso/python/token.py +# +# The following changes were made: +# - Explicit TokenType references instead of dynamic creation. +# - Use dataclasses instead of raw classes. +# pyre-unsafe + +from dataclasses import dataclass + + +@dataclass(frozen=True) +class TokenType: + name: str + contains_syntax: bool = False + + def __repr__(self) -> str: + return "%s(%s)" % (self.__class__.__name__, self.name) + + +class PythonTokenTypes: + """ + Basically an enum, but Python 2 doesn't have enums in the standard library. + """ + + STRING: TokenType = TokenType("STRING") + NUMBER: TokenType = TokenType("NUMBER") + NAME: TokenType = TokenType("NAME", contains_syntax=True) + ERRORTOKEN: TokenType = TokenType("ERRORTOKEN") + NEWLINE: TokenType = TokenType("NEWLINE") + INDENT: TokenType = TokenType("INDENT") + DEDENT: TokenType = TokenType("DEDENT") + ERROR_DEDENT: TokenType = TokenType("ERROR_DEDENT") + ASYNC: TokenType = TokenType("ASYNC") + AWAIT: TokenType = TokenType("AWAIT") + FSTRING_STRING: TokenType = TokenType("FSTRING_STRING") + FSTRING_START: TokenType = TokenType("FSTRING_START") + FSTRING_END: TokenType = TokenType("FSTRING_END") + OP: TokenType = TokenType("OP", contains_syntax=True) + ENDMARKER: TokenType = TokenType("ENDMARKER") diff --git a/libcst/_parser/parso/python/token.py b/libcst/_parser/parso/python/token.py index 204ce94d..164262b9 100644 --- a/libcst/_parser/parso/python/token.py +++ b/libcst/_parser/parso/python/token.py @@ -1,48 +1,33 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. +# Copyright (c) Meta Platforms, Inc. and affiliates. # -# Modifications: -# Copyright David Halter and Contributors -# Modifications are dual-licensed: MIT and PSF. -# 99% of the code is different from pgen2, now. -# -# A fork of `parso.python.token`. -# https://github.com/davidhalter/parso/blob/master/parso/python/token.py -# -# The following changes were made: -# - Explicit TokenType references instead of dynamic creation. -# - Use dataclasses instead of raw classes. -# pyre-unsafe +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. -from dataclasses import dataclass +try: + from libcst_native import token_type as native_token_type + TokenType = native_token_type.TokenType -@dataclass(frozen=True) -class TokenType: - name: str - contains_syntax: bool = False + class PythonTokenTypes: + STRING: TokenType = native_token_type.STRING + NUMBER: TokenType = native_token_type.NUMBER + NAME: TokenType = native_token_type.NAME + NEWLINE: TokenType = native_token_type.NEWLINE + INDENT: TokenType = native_token_type.INDENT + DEDENT: TokenType = native_token_type.DEDENT + ASYNC: TokenType = native_token_type.ASYNC + AWAIT: TokenType = native_token_type.AWAIT + FSTRING_STRING: TokenType = native_token_type.FSTRING_STRING + FSTRING_START: TokenType = native_token_type.FSTRING_START + FSTRING_END: TokenType = native_token_type.FSTRING_END + OP: TokenType = native_token_type.OP + ENDMARKER: TokenType = native_token_type.ENDMARKER + # unused dummy tokens for backwards compat with the parso tokenizer + ERRORTOKEN: TokenType = native_token_type.ERRORTOKEN + ERROR_DEDENT: TokenType = native_token_type.ERROR_DEDENT - def __repr__(self) -> str: - return "%s(%s)" % (self.__class__.__name__, self.name) - - -class PythonTokenTypes: - """ - Basically an enum, but Python 2 doesn't have enums in the standard library. - """ - - STRING: TokenType = TokenType("STRING") - NUMBER: TokenType = TokenType("NUMBER") - NAME: TokenType = TokenType("NAME", contains_syntax=True) - ERRORTOKEN: TokenType = TokenType("ERRORTOKEN") - NEWLINE: TokenType = TokenType("NEWLINE") - INDENT: TokenType = TokenType("INDENT") - DEDENT: TokenType = TokenType("DEDENT") - ERROR_DEDENT: TokenType = TokenType("ERROR_DEDENT") - ASYNC: TokenType = TokenType("ASYNC") - AWAIT: TokenType = TokenType("AWAIT") - FSTRING_STRING: TokenType = TokenType("FSTRING_STRING") - FSTRING_START: TokenType = TokenType("FSTRING_START") - FSTRING_END: TokenType = TokenType("FSTRING_END") - OP: TokenType = TokenType("OP", contains_syntax=True) - ENDMARKER: TokenType = TokenType("ENDMARKER") +except ImportError: + from libcst._parser.parso.python.py_token import ( # noqa: F401 + PythonTokenTypes, + TokenType, + ) diff --git a/libcst/_parser/parso/python/tokenize.py b/libcst/_parser/parso/python/tokenize.py index 0f60472e..711a8785 100644 --- a/libcst/_parser/parso/python/tokenize.py +++ b/libcst/_parser/parso/python/tokenize.py @@ -36,10 +36,10 @@ from collections import namedtuple from dataclasses import dataclass from typing import Dict, Generator, Iterable, Optional, Pattern, Set, Tuple +from libcst import CSTLogicError from libcst._parser.parso.python.token import PythonTokenTypes from libcst._parser.parso.utils import PythonVersionInfo, split_lines - # Maximum code point of Unicode 6.0: 0x10ffff (1,114,111) MAX_UNICODE = "\U0010ffff" BOM_UTF8_STRING = BOM_UTF8.decode("utf-8") @@ -146,8 +146,18 @@ def _get_token_collection(version_info: PythonVersionInfo) -> TokenCollection: return result -fstring_string_single_line = _compile(r"(?:\{\{|\}\}|\\(?:\r\n?|\n)|[^{}\r\n])+") -fstring_string_multi_line = _compile(r"(?:[^{}]+|\{\{|\}\})+") +fstring_raw_string = _compile(r"(?:[^{}]+|\{\{|\}\})+") + +unicode_character_name = r"[A-Za-z0-9\-]+(?: [A-Za-z0-9\-]+)*" +fstring_string_single_line = _compile( + r"(?:\{\{|\}\}|\\N\{" + + unicode_character_name + + r"\}|\\(?:\r\n?|\n)|\\[^\r\nN]|[^{}\r\n\\])+" +) +fstring_string_multi_line = _compile( + r"(?:\{\{|\}\}|\\N\{" + unicode_character_name + r"\}|\\[^N]|[^{}\\])+" +) + fstring_format_spec_single_line = _compile(r"(?:\\(?:\r\n?|\n)|[^{}\r\n])+") fstring_format_spec_multi_line = _compile(r"[^{}]+") @@ -327,8 +337,9 @@ class PythonToken(Token): class FStringNode: - def __init__(self, quote): + def __init__(self, quote, raw): self.quote = quote + self.raw = raw self.parentheses_count = 0 self.previous_lines = "" self.last_string_start_pos = None @@ -377,7 +388,9 @@ def _find_fstring_string(endpats, fstring_stack, line, lnum, pos): else: regex = fstring_format_spec_single_line else: - if allow_multiline: + if tos.raw: + regex = fstring_raw_string + elif allow_multiline: regex = fstring_string_multi_line else: regex = fstring_string_single_line @@ -510,14 +523,14 @@ def _tokenize_lines_py36_or_below( # noqa: C901 if contstr: # continued string if endprog is None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") endmatch = endprog.match(line) if endmatch: pos = endmatch.end(0) if contstr_start is None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") if stashed is not None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") yield PythonToken(STRING, contstr + line[:pos], contstr_start, prefix) contstr = "" contline = None @@ -535,7 +548,7 @@ def _tokenize_lines_py36_or_below( # noqa: C901 ) if string: if stashed is not None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") yield PythonToken( FSTRING_STRING, string, @@ -560,7 +573,7 @@ def _tokenize_lines_py36_or_below( # noqa: C901 pos += quote_length if fstring_end_token is not None: if stashed is not None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") yield fstring_end_token continue @@ -568,6 +581,7 @@ def _tokenize_lines_py36_or_below( # noqa: C901 if not pseudomatch: # scan for tokens match = token_collection.whitespace.match(line, pos) if pos == 0: + # pyre-fixme[16]: `Optional` has no attribute `end`. yield from dedent_if_necessary(match.end()) pos = match.end() new_line = False @@ -575,6 +589,7 @@ def _tokenize_lines_py36_or_below( # noqa: C901 ERRORTOKEN, line[pos], (lnum, pos), + # pyre-fixme[16]: `Optional` has no attribute `group`. additional_prefix + match.group(0), ) additional_prefix = "" @@ -753,7 +768,10 @@ def _tokenize_lines_py36_or_below( # noqa: C901 token in token_collection.fstring_pattern_map ): # The start of an fstring. fstring_stack.append( - FStringNode(token_collection.fstring_pattern_map[token]) + FStringNode( + token_collection.fstring_pattern_map[token], + "r" in token or "R" in token, + ) ) if stashed is not None: yield stashed @@ -868,12 +886,12 @@ def _tokenize_lines_py37_or_above( # noqa: C901 if contstr: # continued string if endprog is None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") endmatch = endprog.match(line) if endmatch: pos = endmatch.end(0) if contstr_start is None: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") yield PythonToken(STRING, contstr + line[:pos], contstr_start, prefix) contstr = "" contline = None @@ -920,6 +938,7 @@ def _tokenize_lines_py37_or_above( # noqa: C901 if not pseudomatch: # scan for tokens match = token_collection.whitespace.match(line, pos) if pos == 0: + # pyre-fixme[16]: `Optional` has no attribute `end`. for t in dedent_if_necessary(match.end()): yield t pos = match.end() @@ -928,6 +947,7 @@ def _tokenize_lines_py37_or_above( # noqa: C901 ERRORTOKEN, line[pos], (lnum, pos), + # pyre-fixme[16]: `Optional` has no attribute `group`. additional_prefix + match.group(0), ) additional_prefix = "" @@ -980,7 +1000,14 @@ def _tokenize_lines_py37_or_above( # noqa: C901 indents.append(indent) break if str.isidentifier(token): - yield PythonToken(NAME, token, spos, prefix) + # py37 doesn't need special tokens for async/await, and we could + # emit NAME, but then we'd need different grammar for py36 and py37. + if token == "async": + yield PythonToken(ASYNC, token, spos, prefix) + elif token == "await": + yield PythonToken(AWAIT, token, spos, prefix) + else: + yield PythonToken(NAME, token, spos, prefix) else: for t in _split_illegal_unicode_name(token, spos, prefix): yield t # yield from Python 2 @@ -1044,7 +1071,10 @@ def _tokenize_lines_py37_or_above( # noqa: C901 token in token_collection.fstring_pattern_map ): # The start of an fstring. fstring_stack.append( - FStringNode(token_collection.fstring_pattern_map[token]) + FStringNode( + token_collection.fstring_pattern_map[token], + "r" in token or "R" in token, + ) ) yield PythonToken(FSTRING_START, token, spos, prefix) elif initial == "\\" and line[start:] in ( diff --git a/libcst/_parser/parso/tests/__init__.py b/libcst/_parser/parso/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/parso/tests/__init__.py +++ b/libcst/_parser/parso/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/parso/tests/test_fstring.py b/libcst/_parser/parso/tests/test_fstring.py index 6851e8b1..255366bb 100644 --- a/libcst/_parser/parso/tests/test_fstring.py +++ b/libcst/_parser/parso/tests/test_fstring.py @@ -15,7 +15,7 @@ # pyre-unsafe from libcst._parser.parso.python.tokenize import tokenize from libcst._parser.parso.utils import parse_version_string -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class ParsoTokenizeTest(UnitTest): diff --git a/libcst/_parser/parso/tests/test_tokenize.py b/libcst/_parser/parso/tests/test_tokenize.py index f2c62d33..c8180047 100644 --- a/libcst/_parser/parso/tests/test_tokenize.py +++ b/libcst/_parser/parso/tests/test_tokenize.py @@ -20,8 +20,7 @@ from textwrap import dedent from libcst._parser.parso.python.token import PythonTokenTypes from libcst._parser.parso.python.tokenize import PythonToken, tokenize from libcst._parser.parso.utils import parse_version_string, split_lines -from libcst.testing.utils import UnitTest, data_provider - +from libcst.testing.utils import data_provider, UnitTest # To make it easier to access some of the token types, just put them here. NAME = PythonTokenTypes.NAME diff --git a/libcst/_parser/parso/tests/test_utils.py b/libcst/_parser/parso/tests/test_utils.py index 17bdb8a0..1f548ef4 100644 --- a/libcst/_parser/parso/tests/test_utils.py +++ b/libcst/_parser/parso/tests/test_utils.py @@ -14,7 +14,7 @@ # - Remove grammar-specific tests # pyre-unsafe from libcst._parser.parso.utils import python_bytes_to_unicode, split_lines -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class ParsoUtilsTest(UnitTest): @@ -39,8 +39,8 @@ class ParsoUtilsTest(UnitTest): # Invalid line breaks ("a\vb", ["a\vb"], False), ("a\vb", ["a\vb"], True), - ("\x1C", ["\x1C"], False), - ("\x1C", ["\x1C"], True), + ("\x1c", ["\x1c"], False), + ("\x1c", ["\x1c"], True), ) ) def test_split_lines(self, string, expected_result, keepends): diff --git a/libcst/_parser/parso/utils.py b/libcst/_parser/parso/utils.py index 27b93731..54517123 100644 --- a/libcst/_parser/parso/utils.py +++ b/libcst/_parser/parso/utils.py @@ -23,16 +23,15 @@ from ast import literal_eval from dataclasses import dataclass from typing import Optional, Sequence, Tuple, Union - # The following is a list in Python that are line breaks in str.splitlines, but # not in Python. In Python only \r (Carriage Return, 0xD) and \n (Line Feed, # 0xA) are allowed to split lines. _NON_LINE_BREAKS = ( "\v", # Vertical Tabulation 0xB "\f", # Form Feed 0xC - "\x1C", # File Separator - "\x1D", # Group Separator - "\x1E", # Record Separator + "\x1c", # File Separator + "\x1d", # Group Separator + "\x1e", # Record Separator "\x85", # Next Line (NEL - Equivalent to CR+LF. # Used to mark end-of-line on some IBM mainframes.) "\u2028", # Line Separator @@ -115,11 +114,11 @@ def python_bytes_to_unicode( return b"utf-8" # pyre-ignore Pyre can't see that Union[str, bytes] conforms to AnyStr. - first_two_match = re.match(br"(?:[^\n]*\n){0,2}", source) + first_two_match = re.match(rb"(?:[^\n]*\n){0,2}", source) if first_two_match is None: return encoding first_two_lines = first_two_match.group(0) - possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)", first_two_lines) + possible_encoding = re.search(rb"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) else: diff --git a/libcst/_parser/production_decorator.py b/libcst/_parser/production_decorator.py index c982bc8d..d5ba52de 100644 --- a/libcst/_parser/production_decorator.py +++ b/libcst/_parser/production_decorator.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,6 @@ from typing import Callable, Optional, Sequence, TypeVar from libcst._parser.types.conversions import NonterminalConversion from libcst._parser.types.production import Production - _NonterminalConversionT = TypeVar( "_NonterminalConversionT", bound=NonterminalConversion ) @@ -22,6 +21,10 @@ def with_production( *, version: Optional[str] = None, future: Optional[str] = None, + # pyre-fixme[34]: `Variable[_NonterminalConversionT (bound to + # typing.Callable[[libcst_native.parser_config.ParserConfig, + # typing.Sequence[typing.Any]], typing.Any])]` isn't present in the function's + # parameters. ) -> Callable[[_NonterminalConversionT], _NonterminalConversionT]: """ Attaches a bit of grammar to a conversion function. The parser extracts all of these @@ -36,7 +39,7 @@ def with_production( # pyre-ignore: Pyre doesn't think that fn has a __name__ attribute fn_name = fn.__name__ if not fn_name.startswith("convert_"): - raise Exception( + raise ValueError( "A function with a production must be named 'convert_X', not " + f"'{fn_name}'." ) diff --git a/libcst/_parser/py_whitespace_parser.py b/libcst/_parser/py_whitespace_parser.py new file mode 100644 index 00000000..6b6573a6 --- /dev/null +++ b/libcst/_parser/py_whitespace_parser.py @@ -0,0 +1,271 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional, Sequence, Tuple, Union + +from libcst import CSTLogicError, ParserSyntaxError +from libcst._nodes.whitespace import ( + Comment, + COMMENT_RE, + EmptyLine, + Newline, + NEWLINE_RE, + ParenthesizedWhitespace, + SIMPLE_WHITESPACE_RE, + SimpleWhitespace, + TrailingWhitespace, +) +from libcst._parser.types.config import BaseWhitespaceParserConfig +from libcst._parser.types.whitespace_state import WhitespaceState as State + +# BEGIN PARSER ENTRYPOINTS + + +def parse_simple_whitespace( + config: BaseWhitespaceParserConfig, state: State +) -> SimpleWhitespace: + # The match never fails because the pattern can match an empty string + lines = config.lines + # pyre-fixme[16]: Optional type has no attribute `group`. + ws_line = SIMPLE_WHITESPACE_RE.match(lines[state.line - 1], state.column).group(0) + ws_line_list = [ws_line] + while "\\" in ws_line: + # continuation character + state.line += 1 + state.column = 0 + ws_line = SIMPLE_WHITESPACE_RE.match(lines[state.line - 1], state.column).group( + 0 + ) + ws_line_list.append(ws_line) + + # TODO: we could special-case the common case where there's no continuation + # character to avoid list construction and joining. + + # once we've finished collecting continuation characters + state.column += len(ws_line) + return SimpleWhitespace("".join(ws_line_list)) + + +def parse_empty_lines( + config: BaseWhitespaceParserConfig, + state: State, + *, + override_absolute_indent: Optional[str] = None, +) -> Sequence[EmptyLine]: + # If override_absolute_indent is true, then we need to parse all lines up + # to and including the last line that is indented at our level. These all + # belong to the footer and not to the next line's leading_lines. All lines + # that have indent=False and come after the last line where indent=True + # do not belong to this node. + state_for_line = State( + state.line, state.column, state.absolute_indent, state.is_parenthesized + ) + lines: List[Tuple[State, EmptyLine]] = [] + while True: + el = _parse_empty_line( + config, state_for_line, override_absolute_indent=override_absolute_indent + ) + if el is None: + break + + # Store the updated state with the element we parsed. Then make a new state + # clone for the next element. + lines.append((state_for_line, el)) + state_for_line = State( + state_for_line.line, + state_for_line.column, + state.absolute_indent, + state.is_parenthesized, + ) + + if override_absolute_indent is not None: + # We need to find the last element that is indented, and then split the list + # at that point. + for i in range(len(lines) - 1, -1, -1): + if lines[i][1].indent: + lines = lines[: (i + 1)] + break + else: + # We didn't find any lines, throw them all away + lines = [] + + if lines: + # Update the state line and column to match the last line actually parsed. + final_state: State = lines[-1][0] + state.line = final_state.line + state.column = final_state.column + return [r[1] for r in lines] + + +def parse_trailing_whitespace( + config: BaseWhitespaceParserConfig, state: State +) -> TrailingWhitespace: + trailing_whitespace = _parse_trailing_whitespace(config, state) + if trailing_whitespace is None: + raise ParserSyntaxError( + "Internal Error: Failed to parse TrailingWhitespace. This should never " + + "happen because a TrailingWhitespace is never optional in the grammar, " + + "so this error should've been caught by parso first.", + lines=config.lines, + raw_line=state.line, + raw_column=state.column, + ) + return trailing_whitespace + + +def parse_parenthesizable_whitespace( + config: BaseWhitespaceParserConfig, state: State +) -> Union[SimpleWhitespace, ParenthesizedWhitespace]: + if state.is_parenthesized: + # First, try parenthesized (don't need speculation because it either + # parses or doesn't modify state). + parenthesized_whitespace = _parse_parenthesized_whitespace(config, state) + if parenthesized_whitespace is not None: + return parenthesized_whitespace + # Now, just parse and return a simple whitespace + return parse_simple_whitespace(config, state) + + +# END PARSER ENTRYPOINTS +# BEGIN PARSER INTERNAL PRODUCTIONS + + +def _parse_empty_line( + config: BaseWhitespaceParserConfig, + state: State, + *, + override_absolute_indent: Optional[str] = None, +) -> Optional[EmptyLine]: + # begin speculative parsing + speculative_state = State( + state.line, state.column, state.absolute_indent, state.is_parenthesized + ) + try: + indent = _parse_indent( + config, speculative_state, override_absolute_indent=override_absolute_indent + ) + except Exception: + # We aren't on a new line, speculative parsing failed + return None + whitespace = parse_simple_whitespace(config, speculative_state) + comment = _parse_comment(config, speculative_state) + newline = _parse_newline(config, speculative_state) + if newline is None: + # speculative parsing failed + return None + # speculative parsing succeeded + state.line = speculative_state.line + state.column = speculative_state.column + # don't need to copy absolute_indent/is_parenthesized because they don't change. + return EmptyLine(indent, whitespace, comment, newline) + + +def _parse_indent( + config: BaseWhitespaceParserConfig, + state: State, + *, + override_absolute_indent: Optional[str] = None, +) -> bool: + """ + Returns True if indentation was found, otherwise False. + """ + absolute_indent = ( + override_absolute_indent + if override_absolute_indent is not None + else state.absolute_indent + ) + line_str = config.lines[state.line - 1] + if state.column != 0: + if state.column == len(line_str) and state.line == len(config.lines): + # We're at EOF, treat this as a failed speculative parse + return False + raise CSTLogicError( + "Internal Error: Column should be 0 when parsing an indent." + ) + if line_str.startswith(absolute_indent, state.column): + state.column += len(absolute_indent) + return True + return False + + +def _parse_comment( + config: BaseWhitespaceParserConfig, state: State +) -> Optional[Comment]: + comment_match = COMMENT_RE.match(config.lines[state.line - 1], state.column) + if comment_match is None: + return None + comment = comment_match.group(0) + state.column += len(comment) + return Comment(comment) + + +def _parse_newline( + config: BaseWhitespaceParserConfig, state: State +) -> Optional[Newline]: + # begin speculative parsing + line_str = config.lines[state.line - 1] + newline_match = NEWLINE_RE.match(line_str, state.column) + if newline_match is not None: + # speculative parsing succeeded + newline_str = newline_match.group(0) + state.column += len(newline_str) + if state.column != len(line_str): + raise ParserSyntaxError( + "Internal Error: Found a newline, but it wasn't the EOL.", + lines=config.lines, + raw_line=state.line, + raw_column=state.column, + ) + if state.line < len(config.lines): + # this newline was the end of a line, and there's another line, + # therefore we should move to the next line + state.line += 1 + state.column = 0 + if newline_str == config.default_newline: + # Just inherit it from the Module instead of explicitly setting it. + return Newline() + else: + return Newline(newline_str) + else: # no newline was found, speculative parsing failed + return None + + +def _parse_trailing_whitespace( + config: BaseWhitespaceParserConfig, state: State +) -> Optional[TrailingWhitespace]: + # Begin speculative parsing + speculative_state = State( + state.line, state.column, state.absolute_indent, state.is_parenthesized + ) + whitespace = parse_simple_whitespace(config, speculative_state) + comment = _parse_comment(config, speculative_state) + newline = _parse_newline(config, speculative_state) + if newline is None: + # Speculative parsing failed + return None + # Speculative parsing succeeded + state.line = speculative_state.line + state.column = speculative_state.column + # don't need to copy absolute_indent/is_parenthesized because they don't change. + return TrailingWhitespace(whitespace, comment, newline) + + +def _parse_parenthesized_whitespace( + config: BaseWhitespaceParserConfig, state: State +) -> Optional[ParenthesizedWhitespace]: + first_line = _parse_trailing_whitespace(config, state) + if first_line is None: + # Speculative parsing failed + return None + empty_lines = () + while True: + empty_line = _parse_empty_line(config, state) + if empty_line is None: + # This isn't an empty line, so parse it below + break + empty_lines = empty_lines + (empty_line,) + indent = _parse_indent(config, state) + last_line = parse_simple_whitespace(config, state) + return ParenthesizedWhitespace(first_line, empty_lines, indent, last_line) diff --git a/libcst/_parser/python_parser.py b/libcst/_parser/python_parser.py index 05ea0b57..7f3d53db 100644 --- a/libcst/_parser/python_parser.py +++ b/libcst/_parser/python_parser.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/tests/__init__.py b/libcst/_parser/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/tests/__init__.py +++ b/libcst/_parser/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/tests/test_config.py b/libcst/_parser/tests/test_config.py index 78692369..d9c31db5 100644 --- a/libcst/_parser/tests/test_config.py +++ b/libcst/_parser/tests/test_config.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/tests/test_detect_config.py b/libcst/_parser/tests/test_detect_config.py index b17c9fe5..6d9eaa6c 100644 --- a/libcst/_parser/tests/test_detect_config.py +++ b/libcst/_parser/tests/test_detect_config.py @@ -1,15 +1,18 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -import dataclasses from typing import Union from libcst._parser.detect_config import detect_config from libcst._parser.parso.utils import PythonVersionInfo -from libcst._parser.types.config import ParserConfig, PartialParserConfig -from libcst.testing.utils import UnitTest, data_provider +from libcst._parser.types.config import ( + parser_config_asdict, + ParserConfig, + PartialParserConfig, +) +from libcst.testing.utils import data_provider, UnitTest class TestDetectConfig(UnitTest): @@ -316,7 +319,7 @@ class TestDetectConfig(UnitTest): expected_config: ParserConfig, ) -> None: self.assertEqual( - dataclasses.asdict( + parser_config_asdict( detect_config( source, partial=partial, @@ -324,5 +327,5 @@ class TestDetectConfig(UnitTest): detect_default_newline=detect_default_newline, ).config ), - dataclasses.asdict(expected_config), + parser_config_asdict(expected_config), ) diff --git a/libcst/_parser/tests/test_footer_behavior.py b/libcst/_parser/tests/test_footer_behavior.py index 23ff4e25..ccac8254 100644 --- a/libcst/_parser/tests/test_footer_behavior.py +++ b/libcst/_parser/tests/test_footer_behavior.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,14 +8,17 @@ from textwrap import dedent import libcst as cst from libcst import parse_module from libcst._nodes.deep_equals import deep_equals -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class FooterBehaviorTest(UnitTest): @data_provider( { # Literally the most basic example - "simple_module": {"code": "\n", "expected_module": cst.Module(body=())}, + "simple_module": { + "code": "", + "expected_module": cst.Module(body=(), has_trailing_newline=False), + }, # A module with a header comment "header_only_module": { "code": "# This is a header comment\n", diff --git a/libcst/_parser/tests/test_node_identity.py b/libcst/_parser/tests/test_node_identity.py index dce157ef..91171915 100644 --- a/libcst/_parser/tests/test_node_identity.py +++ b/libcst/_parser/tests/test_node_identity.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from collections import Counter from textwrap import dedent import libcst as cst -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class DuplicateLeafNodeTest(UnitTest): diff --git a/libcst/_parser/tests/test_parse_errors.py b/libcst/_parser/tests/test_parse_errors.py index 6d651f3b..7697893d 100644 --- a/libcst/_parser/tests/test_parse_errors.py +++ b/libcst/_parser/tests/test_parse_errors.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,9 +6,11 @@ from textwrap import dedent from typing import Callable +from unittest.mock import patch import libcst as cst -from libcst.testing.utils import UnitTest, data_provider +from libcst._nodes.base import CSTValidationError +from libcst.testing.utils import data_provider, UnitTest class ParseErrorsTest(UnitTest): @@ -169,4 +171,11 @@ class ParseErrorsTest(UnitTest): ) -> None: with self.assertRaises(cst.ParserSyntaxError) as cm: parse_fn() - self.assertEqual(str(cm.exception), expected) + # make sure str() doesn't blow up + self.assertIn("Syntax Error", str(cm.exception)) + + def test_native_fallible_into_py(self) -> None: + with patch("libcst._nodes.expression.Name._validate") as await_validate: + await_validate.side_effect = CSTValidationError("validate is broken") + with self.assertRaises((SyntaxError, cst.ParserSyntaxError)): + cst.parse_module("foo") diff --git a/libcst/_parser/tests/test_version_compare.py b/libcst/_parser/tests/test_version_compare.py index efef5a25..102657d6 100644 --- a/libcst/_parser/tests/test_version_compare.py +++ b/libcst/_parser/tests/test_version_compare.py @@ -1,11 +1,11 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst._parser.grammar import _should_include from libcst._parser.parso.utils import PythonVersionInfo -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class VersionCompareTest(UnitTest): diff --git a/libcst/_parser/tests/test_whitespace_parser.py b/libcst/_parser/tests/test_whitespace_parser.py index dcbafa7e..bbe8886a 100644 --- a/libcst/_parser/tests/test_whitespace_parser.py +++ b/libcst/_parser/tests/test_whitespace_parser.py @@ -1,32 +1,24 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from dataclasses import dataclass -from typing import Callable, Sequence, TypeVar +from typing import Callable, TypeVar import libcst as cst from libcst._nodes.deep_equals import deep_equals -from libcst._parser.types.config import BaseWhitespaceParserConfig +from libcst._parser.types.config import MockWhitespaceParserConfig as Config from libcst._parser.types.whitespace_state import WhitespaceState as State from libcst._parser.whitespace_parser import ( parse_empty_lines, parse_simple_whitespace, parse_trailing_whitespace, ) -from libcst.testing.utils import UnitTest, data_provider - +from libcst.testing.utils import data_provider, UnitTest _T = TypeVar("_T") -@dataclass(frozen=True) -class Config(BaseWhitespaceParserConfig): - lines: Sequence[str] - default_newline: str - - class WhitespaceParserTest(UnitTest): @data_provider( { diff --git a/libcst/_parser/tests/test_wrapped_tokenize.py b/libcst/_parser/tests/test_wrapped_tokenize.py index 56bf3dbd..dbaf3700 100644 --- a/libcst/_parser/tests/test_wrapped_tokenize.py +++ b/libcst/_parser/tests/test_wrapped_tokenize.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,11 +8,10 @@ from typing import Sequence from libcst._exceptions import ParserSyntaxError from libcst._parser.parso.python.token import PythonTokenTypes -from libcst._parser.parso.utils import PythonVersionInfo, parse_version_string +from libcst._parser.parso.utils import parse_version_string, PythonVersionInfo from libcst._parser.types.whitespace_state import WhitespaceState from libcst._parser.wrapped_tokenize import Token, tokenize -from libcst.testing.utils import UnitTest, data_provider - +from libcst.testing.utils import data_provider, UnitTest _PY38 = parse_version_string("3.8.0") _PY37 = parse_version_string("3.7.0") @@ -1215,7 +1214,7 @@ class WrappedTokenizeTest(UnitTest): _PY37, ( Token( - type=PythonTokenTypes.NAME, + type=PythonTokenTypes.ASYNC, string="async", start_pos=(1, 0), end_pos=(1, 5), @@ -1365,7 +1364,7 @@ class WrappedTokenizeTest(UnitTest): relative_indent=None, ), Token( - type=PythonTokenTypes.NAME, + type=PythonTokenTypes.AWAIT, string="await", start_pos=(2, 11), end_pos=(2, 16), @@ -1650,7 +1649,7 @@ class WrappedTokenizeTest(UnitTest): _PY38, ( Token( - type=PythonTokenTypes.NAME, + type=PythonTokenTypes.ASYNC, string="async", start_pos=(1, 0), end_pos=(1, 5), @@ -1800,7 +1799,7 @@ class WrappedTokenizeTest(UnitTest): relative_indent=None, ), Token( - type=PythonTokenTypes.NAME, + type=PythonTokenTypes.AWAIT, string="await", start_pos=(2, 11), end_pos=(2, 16), diff --git a/libcst/_parser/types/__init__.py b/libcst/_parser/types/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/types/__init__.py +++ b/libcst/_parser/types/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/types/config.py b/libcst/_parser/types/config.py index 7c76e4c7..289fd8ae 100644 --- a/libcst/_parser/types/config.py +++ b/libcst/_parser/types/config.py @@ -1,51 +1,35 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. - -import abc import codecs import re import sys from dataclasses import dataclass, field, fields from enum import Enum -from typing import FrozenSet, List, Optional, Pattern, Sequence, Union +from typing import Any, Callable, FrozenSet, List, Mapping, Optional, Pattern, Union from libcst._add_slots import add_slots from libcst._nodes.whitespace import NEWLINE_RE -from libcst._parser.parso.utils import PythonVersionInfo, parse_version_string - +from libcst._parser.parso.utils import parse_version_string, PythonVersionInfo _INDENT_RE: Pattern[str] = re.compile(r"[ \t]+") +try: + from libcst_native import parser_config as config_mod -class BaseWhitespaceParserConfig(abc.ABC): - """ - Represents the subset of ParserConfig that the whitespace parser requires. This - makes calling the whitespace parser in tests with a mocked configuration easier. - """ + MockWhitespaceParserConfig = config_mod.BaseWhitespaceParserConfig +except ImportError: + from libcst._parser.types import py_config as config_mod - lines: Sequence[str] - default_newline: str + MockWhitespaceParserConfig = config_mod.MockWhitespaceParserConfig - -@add_slots # We'll access these properties frequently, so use slots -@dataclass(frozen=True) -class ParserConfig(BaseWhitespaceParserConfig): - """ - An internal configuration object that the python parser passes around. These values - are global to the parsed code and should not change during the lifetime of the - parser object. - """ - - lines: Sequence[str] - encoding: str - default_indent: str - default_newline: str - has_trailing_newline: bool - version: PythonVersionInfo - future_imports: FrozenSet[str] +BaseWhitespaceParserConfig = config_mod.BaseWhitespaceParserConfig +ParserConfig = config_mod.ParserConfig +parser_config_asdict: Callable[[ParserConfig], Mapping[str, Any]] = ( + config_mod.parser_config_asdict +) class AutoConfig(Enum): diff --git a/libcst/_parser/types/conversions.py b/libcst/_parser/types/conversions.py index d0193624..4c589c52 100644 --- a/libcst/_parser/types/conversions.py +++ b/libcst/_parser/types/conversions.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,6 @@ from typing import Any, Callable, Sequence from libcst._parser.types.config import ParserConfig from libcst._parser.types.token import Token - # pyre-fixme[33]: Aliased annotation cannot contain `Any`. NonterminalConversion = Callable[[ParserConfig, Sequence[Any]], Any] # pyre-fixme[33]: Aliased annotation cannot contain `Any`. diff --git a/libcst/_parser/types/partials.py b/libcst/_parser/types/partials.py index a53f3778..4db89fab 100644 --- a/libcst/_parser/types/partials.py +++ b/libcst/_parser/types/partials.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -29,7 +29,6 @@ from libcst._nodes.statement import AsName, BaseSmallStatement, Decorator, Impor from libcst._nodes.whitespace import EmptyLine, SimpleWhitespace, TrailingWhitespace from libcst._parser.types.whitespace_state import WhitespaceState - _T = TypeVar("_T") diff --git a/libcst/_parser/types/production.py b/libcst/_parser/types/production.py index bb60014a..dfeffe7b 100644 --- a/libcst/_parser/types/production.py +++ b/libcst/_parser/types/production.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/types/py_config.py b/libcst/_parser/types/py_config.py new file mode 100644 index 00000000..d7732591 --- /dev/null +++ b/libcst/_parser/types/py_config.py @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import abc +from dataclasses import asdict, dataclass +from typing import Any, FrozenSet, Mapping, Sequence + +from libcst._parser.parso.utils import PythonVersionInfo + + +class BaseWhitespaceParserConfig(abc.ABC): + """ + Represents the subset of ParserConfig that the whitespace parser requires. This + makes calling the whitespace parser in tests with a mocked configuration easier. + """ + + lines: Sequence[str] + default_newline: str + + +@dataclass(frozen=True) +class MockWhitespaceParserConfig(BaseWhitespaceParserConfig): + """ + An internal type used by unit tests. + """ + + lines: Sequence[str] + default_newline: str + + +@dataclass(frozen=True) +class ParserConfig(BaseWhitespaceParserConfig): + """ + An internal configuration object that the python parser passes around. These + values are global to the parsed code and should not change during the lifetime + of the parser object. + """ + + lines: Sequence[str] + encoding: str + default_indent: str + default_newline: str + has_trailing_newline: bool + version: PythonVersionInfo + future_imports: FrozenSet[str] + + +def parser_config_asdict(config: ParserConfig) -> Mapping[str, Any]: + """ + An internal helper function used by unit tests to compare configs. + """ + return asdict(config) diff --git a/libcst/_parser/types/py_token.py b/libcst/_parser/types/py_token.py new file mode 100644 index 00000000..d2f9b537 --- /dev/null +++ b/libcst/_parser/types/py_token.py @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +from dataclasses import dataclass +from typing import Optional, Tuple + +from libcst._add_slots import add_slots +from libcst._parser.parso.python.token import TokenType +from libcst._parser.types.whitespace_state import WhitespaceState + + +@add_slots +@dataclass(frozen=True) +class Token: + type: TokenType + string: str + # The start of where `string` is in the source, not including leading whitespace. + start_pos: Tuple[int, int] + # The end of where `string` is in the source, not including trailing whitespace. + end_pos: Tuple[int, int] + whitespace_before: WhitespaceState + whitespace_after: WhitespaceState + # The relative indent this token adds. + relative_indent: Optional[str] diff --git a/libcst/_parser/types/py_whitespace_state.py b/libcst/_parser/types/py_whitespace_state.py new file mode 100644 index 00000000..6359e83e --- /dev/null +++ b/libcst/_parser/types/py_whitespace_state.py @@ -0,0 +1,36 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass + +from libcst._add_slots import add_slots + + +@add_slots +@dataclass(frozen=False) +class WhitespaceState: + """ + A frequently mutated store of the whitespace parser's current state. This object + must be cloned prior to speculative parsing. + + This is in contrast to the `config` object each whitespace parser function takes, + which is frozen and never mutated. + + Whitespace parsing works by mutating this state object. By encapsulating saving, and + re-using state objects inside the top-level python parser, the whitespace parser is + able to be reentrant. One 'convert' function can consume part of the whitespace, and + another 'convert' function can consume the rest, depending on who owns what + whitespace. + + This is similar to the approach you might take to parse nested languages (e.g. + JavaScript inside of HTML). We're treating whitespace as a separate language and + grammar from the rest of Python's grammar. + """ + + line: int # one-indexed (to match parso's behavior) + column: int # zero-indexed (to match parso's behavior) + # What to look for when executing `_parse_indent`. + absolute_indent: str + is_parenthesized: bool diff --git a/libcst/_parser/types/tests/__init__.py b/libcst/_parser/types/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/_parser/types/tests/__init__.py +++ b/libcst/_parser/types/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_parser/types/tests/test_config.py b/libcst/_parser/types/tests/test_config.py index 8b68bd18..12a6ad27 100644 --- a/libcst/_parser/types/tests/test_config.py +++ b/libcst/_parser/types/tests/test_config.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from typing import Callable from libcst._parser.types.config import PartialParserConfig -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class TestConfig(UnitTest): diff --git a/libcst/_parser/types/token.py b/libcst/_parser/types/token.py index 60ddb2a2..54d904ef 100644 --- a/libcst/_parser/types/token.py +++ b/libcst/_parser/types/token.py @@ -1,27 +1,12 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from dataclasses import dataclass -from typing import Optional, Tuple +try: + from libcst_native import tokenize -from libcst._add_slots import add_slots -from libcst._parser.parso.python.token import TokenType -from libcst._parser.types.whitespace_state import WhitespaceState - - -@add_slots -@dataclass(frozen=True) -class Token: - type: TokenType - string: str - # The start of where `string` is in the source, not including leading whitespace. - start_pos: Tuple[int, int] - # The end of where `string` is in the source, not including trailing whitespace. - end_pos: Tuple[int, int] - whitespace_before: WhitespaceState - whitespace_after: WhitespaceState - # The relative indent this token adds. - relative_indent: Optional[str] + Token = tokenize.Token +except ImportError: + from libcst._parser.types.py_token import Token # noqa: F401 diff --git a/libcst/_parser/types/whitespace_state.py b/libcst/_parser/types/whitespace_state.py index b5554a2b..7eaeab32 100644 --- a/libcst/_parser/types/whitespace_state.py +++ b/libcst/_parser/types/whitespace_state.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,34 +7,9 @@ Defines the state object used by the whitespace parser. """ -from dataclasses import dataclass +try: + from libcst_native import whitespace_state as mod +except ImportError: + from libcst._parser.types import py_whitespace_state as mod -from libcst._add_slots import add_slots - - -@add_slots -@dataclass(frozen=False) -class WhitespaceState: - """ - A frequently mutated store of the whitespace parser's current state. This object - must be cloned prior to speculative parsing. - - This is in contrast to the `config` object each whitespace parser function takes, - which is frozen and never mutated. - - Whitespace parsing works by mutating this state object. By encapsulating saving, and - re-using state objects inside the top-level python parser, the whitespace parser is - able to be reentrant. One 'convert' function can consume part of the whitespace, and - another 'convert' function can consume the rest, depending on who owns what - whitespace. - - This is similar to the approach you might take to parse nested languages (e.g. - JavaScript inside of HTML). We're treating whitespace as a separate language and - grammar from the rest of Python's grammar. - """ - - line: int # one-indexed (to match parso's behavior) - column: int # zero-indexed (to match parso's behavior) - # What to look for when executing `_parse_indent`. - absolute_indent: str - is_parenthesized: bool +WhitespaceState = mod.WhitespaceState diff --git a/libcst/_parser/whitespace_parser.py b/libcst/_parser/whitespace_parser.py index b9df6c7e..1c29efc5 100644 --- a/libcst/_parser/whitespace_parser.py +++ b/libcst/_parser/whitespace_parser.py @@ -1,11 +1,11 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Parso doesn't attempt to parse (or even emit tokens for) whitespace or comments that -isn't syntatically important. Instead, we're just given the whitespace as a "prefix" of +aren't syntatically important. Instead, we're just given the whitespace as a "prefix" of the token. However, in our CST, whitespace is gathered into far more detailed objects than a simple @@ -15,259 +15,15 @@ Fortunately this isn't hard for us to parse ourselves, so we just use our own hand-rolled recursive descent parser. """ -from typing import List, Optional, Sequence, Tuple, Union +try: + # It'd be better to do `from libcst_native.whitespace_parser import *`, but we're + # blocked on https://github.com/PyO3/pyo3/issues/759 + # (which ultimately seems to be a limitation of how importlib works) + from libcst_native import whitespace_parser as mod +except ImportError: + from libcst._parser import py_whitespace_parser as mod -from libcst._nodes.whitespace import ( - COMMENT_RE, - NEWLINE_RE, - SIMPLE_WHITESPACE_RE, - Comment, - EmptyLine, - Newline, - ParenthesizedWhitespace, - SimpleWhitespace, - TrailingWhitespace, -) -from libcst._parser.types.config import BaseWhitespaceParserConfig -from libcst._parser.types.whitespace_state import WhitespaceState as State - - -# BEGIN PARSER ENTRYPOINTS - - -def parse_simple_whitespace( - config: BaseWhitespaceParserConfig, state: State -) -> SimpleWhitespace: - # The match never fails because the pattern can match an empty string - lines = config.lines - # pyre-fixme[16]: Optional type has no attribute `group`. - ws_line = SIMPLE_WHITESPACE_RE.match(lines[state.line - 1], state.column).group(0) - ws_line_list = [ws_line] - while "\\" in ws_line: - # continuation character - state.line += 1 - state.column = 0 - ws_line = SIMPLE_WHITESPACE_RE.match(lines[state.line - 1], state.column).group( - 0 - ) - ws_line_list.append(ws_line) - - # TODO: we could special-case the common case where there's no continuation - # character to avoid list construction and joining. - - # once we've finished collecting continuation characters - state.column += len(ws_line) - return SimpleWhitespace("".join(ws_line_list)) - - -def parse_empty_lines( - config: BaseWhitespaceParserConfig, - state: State, - *, - override_absolute_indent: Optional[str] = None, -) -> Sequence[EmptyLine]: - # If override_absolute_indent is true, then we need to parse all lines up - # to and including the last line that is indented at our level. These all - # belong to the footer and not to the next line's leading_lines. All lines - # that have indent=False and come after the last line where indent=True - # do not belong to this node. - state_for_line = State( - state.line, state.column, state.absolute_indent, state.is_parenthesized - ) - lines: List[Tuple[State, EmptyLine]] = [] - while True: - el = _parse_empty_line( - config, state_for_line, override_absolute_indent=override_absolute_indent - ) - if el is None: - break - - # Store the updated state with the element we parsed. Then make a new state - # clone for the next element. - lines.append((state_for_line, el)) - state_for_line = State( - state_for_line.line, - state_for_line.column, - state.absolute_indent, - state.is_parenthesized, - ) - - if override_absolute_indent is not None: - # We need to find the last element that is indented, and then split the list - # at that point. - for i in range(len(lines) - 1, -1, -1): - if lines[i][1].indent: - lines = lines[: (i + 1)] - break - else: - # We didn't find any lines, throw them all away - lines = [] - - if lines: - # Update the state line and column to match the last line actually parsed. - final_state: State = lines[-1][0] - state.line = final_state.line - state.column = final_state.column - return [r[1] for r in lines] - - -def parse_trailing_whitespace( - config: BaseWhitespaceParserConfig, state: State -) -> TrailingWhitespace: - trailing_whitespace = _parse_trailing_whitespace(config, state) - if trailing_whitespace is None: - raise Exception( - "Internal Error: Failed to parse TrailingWhitespace. This should never " - + "happen because a TrailingWhitespace is never optional in the grammar, " - + "so this error should've been caught by parso first." - ) - return trailing_whitespace - - -def parse_parenthesizable_whitespace( - config: BaseWhitespaceParserConfig, state: State -) -> Union[SimpleWhitespace, ParenthesizedWhitespace]: - if state.is_parenthesized: - # First, try parenthesized (don't need speculation because it either - # parses or doesn't modify state). - parenthesized_whitespace = _parse_parenthesized_whitespace(config, state) - if parenthesized_whitespace is not None: - return parenthesized_whitespace - # Now, just parse and return a simple whitespace - return parse_simple_whitespace(config, state) - - -# END PARSER ENTRYPOINTS -# BEGIN PARSER INTERNAL PRODUCTIONS - - -def _parse_empty_line( - config: BaseWhitespaceParserConfig, - state: State, - *, - override_absolute_indent: Optional[str] = None, -) -> Optional[EmptyLine]: - # begin speculative parsing - speculative_state = State( - state.line, state.column, state.absolute_indent, state.is_parenthesized - ) - try: - indent = _parse_indent( - config, speculative_state, override_absolute_indent=override_absolute_indent - ) - except Exception: - # We aren't on a new line, speculative parsing failed - return None - whitespace = parse_simple_whitespace(config, speculative_state) - comment = _parse_comment(config, speculative_state) - newline = _parse_newline(config, speculative_state) - if newline is None: - # speculative parsing failed - return None - # speculative parsing succeeded - state.line = speculative_state.line - state.column = speculative_state.column - # don't need to copy absolute_indent/is_parenthesized because they don't change. - return EmptyLine(indent, whitespace, comment, newline) - - -def _parse_indent( - config: BaseWhitespaceParserConfig, - state: State, - *, - override_absolute_indent: Optional[str] = None, -) -> bool: - """ - Returns True if indentation was found, otherwise False. - """ - absolute_indent = ( - override_absolute_indent - if override_absolute_indent is not None - else state.absolute_indent - ) - line_str = config.lines[state.line - 1] - if state.column != 0: - if state.column == len(line_str) and state.line == len(config.lines): - # We're at EOF, treat this as a failed speculative parse - return False - raise Exception("Internal Error: Column should be 0 when parsing an indent.") - if line_str.startswith(absolute_indent, state.column): - state.column += len(absolute_indent) - return True - return False - - -def _parse_comment( - config: BaseWhitespaceParserConfig, state: State -) -> Optional[Comment]: - comment_match = COMMENT_RE.match(config.lines[state.line - 1], state.column) - if comment_match is None: - return None - comment = comment_match.group(0) - state.column += len(comment) - return Comment(comment) - - -def _parse_newline( - config: BaseWhitespaceParserConfig, state: State -) -> Optional[Newline]: - # begin speculative parsing - line_str = config.lines[state.line - 1] - newline_match = NEWLINE_RE.match(line_str, state.column) - if newline_match is not None: - # speculative parsing succeeded - newline_str = newline_match.group(0) - state.column += len(newline_str) - if state.column != len(line_str): - raise Exception("Internal Error: Found a newline, but it wasn't the EOL.") - if state.line < len(config.lines): - # this newline was the end of a line, and there's another line, - # therefore we should move to the next line - state.line += 1 - state.column = 0 - if newline_str == config.default_newline: - # Just inherit it from the Module instead of explicitly setting it. - return Newline() - else: - return Newline(newline_str) - else: # no newline was found, speculative parsing failed - return None - - -def _parse_trailing_whitespace( - config: BaseWhitespaceParserConfig, state: State -) -> Optional[TrailingWhitespace]: - # Begin speculative parsing - speculative_state = State( - state.line, state.column, state.absolute_indent, state.is_parenthesized - ) - whitespace = parse_simple_whitespace(config, speculative_state) - comment = _parse_comment(config, speculative_state) - newline = _parse_newline(config, speculative_state) - if newline is None: - # Speculative parsing failed - return None - # Speculative parsing succeeded - state.line = speculative_state.line - state.column = speculative_state.column - # don't need to copy absolute_indent/is_parenthesized because they don't change. - return TrailingWhitespace(whitespace, comment, newline) - - -def _parse_parenthesized_whitespace( - config: BaseWhitespaceParserConfig, state: State -) -> Optional[ParenthesizedWhitespace]: - first_line = _parse_trailing_whitespace(config, state) - if first_line is None: - # Speculative parsing failed - return None - empty_lines = () - while True: - empty_line = _parse_empty_line(config, state) - if empty_line is None: - # This isn't an empty line, so parse it below - break - empty_lines = empty_lines + (empty_line,) - indent = _parse_indent(config, state) - last_line = parse_simple_whitespace(config, state) - return ParenthesizedWhitespace(first_line, empty_lines, indent, last_line) +parse_simple_whitespace = mod.parse_simple_whitespace +parse_empty_lines = mod.parse_empty_lines +parse_trailing_whitespace = mod.parse_trailing_whitespace +parse_parenthesizable_whitespace = mod.parse_parenthesizable_whitespace diff --git a/libcst/_parser/wrapped_tokenize.py b/libcst/_parser/wrapped_tokenize.py index 6104757d..8d601052 100644 --- a/libcst/_parser/wrapped_tokenize.py +++ b/libcst/_parser/wrapped_tokenize.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -22,7 +22,7 @@ tokenize module, instead of as a wrapper. from dataclasses import dataclass, field from enum import Enum -from typing import Generator, List, Optional, Sequence +from typing import Generator, Iterator, List, Optional, Sequence from libcst._add_slots import add_slots from libcst._exceptions import ParserSyntaxError @@ -35,7 +35,6 @@ from libcst._parser.parso.utils import PythonVersionInfo, split_lines from libcst._parser.types.token import Token from libcst._parser.types.whitespace_state import WhitespaceState - _ERRORTOKEN: TokenType = PythonTokenTypes.ERRORTOKEN _ERROR_DEDENT: TokenType = PythonTokenTypes.ERROR_DEDENT @@ -77,15 +76,30 @@ class _TokenizeState: ) -def tokenize( - code: str, version_info: PythonVersionInfo -) -> Generator[Token, None, None]: - lines = split_lines(code, keepends=True) - return tokenize_lines(lines, version_info) +def tokenize(code: str, version_info: PythonVersionInfo) -> Iterator[Token]: + try: + from libcst_native import tokenize as native_tokenize + + return native_tokenize.tokenize(code) + except ImportError: + lines = split_lines(code, keepends=True) + return tokenize_lines(code, lines, version_info) def tokenize_lines( - lines: Sequence[str], version_info: PythonVersionInfo + code: str, lines: Sequence[str], version_info: PythonVersionInfo +) -> Iterator[Token]: + try: + from libcst_native import tokenize as native_tokenize + + # TODO: pass through version_info + return native_tokenize.tokenize(code) + except ImportError: + return tokenize_lines_py(code, lines, version_info) + + +def tokenize_lines_py( + code: str, lines: Sequence[str], version_info: PythonVersionInfo ) -> Generator[Token, None, None]: state = _TokenizeState(lines) orig_tokens_iter = iter(orig_tokenize_lines(lines, version_info)) diff --git a/libcst/_position.py b/libcst/_position.py index 82411402..e81e9ab4 100644 --- a/libcst/_position.py +++ b/libcst/_position.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -13,11 +13,10 @@ locations. """ from dataclasses import dataclass -from typing import Tuple, Union, cast, overload +from typing import cast, overload, Tuple, Union from libcst._add_slots import add_slots - _CodePositionT = Union[Tuple[int, int], "CodePosition"] @@ -32,6 +31,8 @@ class CodePosition: @add_slots @dataclass(frozen=True) +# pyre-fixme[13]: Attribute `end` is never initialized. +# pyre-fixme[13]: Attribute `start` is never initialized. class CodeRange: #: Starting position of a node (inclusive). start: CodePosition @@ -39,15 +40,11 @@ class CodeRange: end: CodePosition @overload - def __init__(self, start: CodePosition, end: CodePosition) -> None: - ... + def __init__(self, start: CodePosition, end: CodePosition) -> None: ... @overload - def __init__(self, start: Tuple[int, int], end: Tuple[int, int]) -> None: - ... + def __init__(self, start: Tuple[int, int], end: Tuple[int, int]) -> None: ... - # pyre-ignore[13]: Attribute `end` is never initialized. - # pyre-ignore[13]: Attribute `start` is never initialized. def __init__(self, start: _CodePositionT, end: _CodePositionT) -> None: if isinstance(start, tuple) and isinstance(end, tuple): object.__setattr__(self, "start", CodePosition(start[0], start[1])) diff --git a/libcst/_removal_sentinel.py b/libcst/_removal_sentinel.py index b8ba8498..f88f4126 100644 --- a/libcst/_removal_sentinel.py +++ b/libcst/_removal_sentinel.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,7 @@ Used by visitors. This is hoisted into a separate module to avoid some circular dependencies in the definition of CSTNode. """ -from enum import Enum, auto +from enum import auto, Enum class RemovalSentinel(Enum): diff --git a/libcst/_tabs.py b/libcst/_tabs.py index 44e245ba..0a98bc55 100644 --- a/libcst/_tabs.py +++ b/libcst/_tabs.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/_type_enforce.py b/libcst/_type_enforce.py index 12cb423f..dded4525 100644 --- a/libcst/_type_enforce.py +++ b/libcst/_type_enforce.py @@ -1,22 +1,31 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, Iterable, Mapping, MutableMapping, MutableSequence, Tuple - -from typing_extensions import Literal -from typing_inspect import get_args, get_origin, is_classvar, is_typevar, is_union_type - - -try: # py37+ - from typing import ForwardRef -except ImportError: # py36 - from typing import _ForwardRef as ForwardRef +from typing import ( + Any, + ClassVar, + ForwardRef, + get_args, + get_origin, + Iterable, + Literal, + Mapping, + MutableMapping, + MutableSequence, + Tuple, + TypeVar, + Union, +) def is_value_of_type( # noqa: C901 "too complex" - value: Any, expected_type: Any, invariant_check: bool = False + # pyre-fixme[2]: Parameter annotation cannot be `Any`. + value: Any, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. + expected_type: Any, + invariant_check: bool = False, ) -> bool: """ This method attempts to verify a given value is of a given type. If the type is @@ -42,15 +51,11 @@ def is_value_of_type( # noqa: C901 "too complex" - Forward Refs -- use `typing.get_type_hints` to resolve these - Type[...] """ - if is_classvar(expected_type): - # `ClassVar` (no subscript) is implicitly `ClassVar[Any]` - if hasattr(expected_type, "__type__"): # py36 - expected_type = expected_type.__type__ or Any - else: # py37+ - classvar_args = get_args(expected_type) - expected_type = (classvar_args[0] or Any) if classvar_args else Any + if expected_type is ClassVar or get_origin(expected_type) is ClassVar: + classvar_args = get_args(expected_type) + expected_type = (classvar_args[0] or Any) if classvar_args else Any - if is_typevar(expected_type): + if type(expected_type) is TypeVar: # treat this the same as Any # TODO: evaluate bounds return True @@ -60,16 +65,13 @@ def is_value_of_type( # noqa: C901 "too complex" if expected_origin_type == Any: return True - elif is_union_type(expected_type): + elif expected_type is Union or get_origin(expected_type) is Union: return any( is_value_of_type(value, subtype) for subtype in expected_type.__args__ ) elif isinstance(expected_origin_type, type(Literal)): - if hasattr(expected_type, "__values__"): # py36 - literal_values = expected_type.__values__ - else: # py37+ - literal_values = get_args(expected_type, evaluate=True) + literal_values = get_args(expected_type) return any(value == literal for literal in literal_values) elif isinstance(expected_origin_type, ForwardRef): @@ -80,17 +82,14 @@ def is_value_of_type( # noqa: C901 "too complex" # We don't want to include Tuple subclasses, like NamedTuple, because they're # unlikely to behave similarly. elif expected_origin_type in [Tuple, tuple]: # py36 uses Tuple, py37+ uses tuple - if not isinstance(value, Tuple): + if not isinstance(value, tuple): return False - type_args = get_args(expected_type, evaluate=True) + type_args = get_args(expected_type) if len(type_args) == 0: # `Tuple` (no subscript) is implicitly `Tuple[Any, ...]` return True - if type_args is None: - return True - if len(value) != len(type_args): return False # TODO: Handle `Tuple[T, ...]` like `Iterable[T]` @@ -107,7 +106,7 @@ def is_value_of_type( # noqa: C901 "too complex" if not issubclass(type(value), expected_origin_type): return False - type_args = get_args(expected_type, evaluate=True) + type_args = get_args(expected_type) if len(type_args) == 0: # `Mapping` (no subscript) is implicitly `Mapping[Any, Any]`. return True @@ -134,7 +133,8 @@ def is_value_of_type( # noqa: C901 "too complex" # Similarly, tuple subclasses tend to have pretty different behavior, and we should # fall back to the default check. elif issubclass(expected_origin_type, Iterable) and not issubclass( - expected_origin_type, (str, bytes, Tuple) + expected_origin_type, + (str, bytes, tuple), ): # We know this thing is *some* kind of Iterable, but we want to # allow subclasses. That means we want [1,2,3] to match both @@ -143,7 +143,7 @@ def is_value_of_type( # noqa: C901 "too complex" if not issubclass(type(value), expected_origin_type): return False - type_args = get_args(expected_type, evaluate=True) + type_args = get_args(expected_type) if len(type_args) == 0: # `Iterable` (no subscript) is implicitly `Iterable[Any]`. return True diff --git a/libcst/_typed_visitor.py b/libcst/_typed_visitor.py index bbc10d55..8816f619 100644 --- a/libcst/_typed_visitor.py +++ b/libcst/_typed_visitor.py @@ -1,12 +1,13 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This file was generated by libcst.codegen.gen_matcher_classes -from typing import TYPE_CHECKING, Optional, Union +from typing import Optional, TYPE_CHECKING, Union +from libcst._flatten_sentinel import FlattenSentinel from libcst._maybe_sentinel import MaybeSentinel from libcst._removal_sentinel import RemovalSentinel from libcst._typed_visitor_base import mark_no_op @@ -24,6 +25,7 @@ if TYPE_CHECKING: BaseExpression, BaseFormattedStringContent, BaseSlice, + BaseTemplatedStringContent, BinaryOperation, BooleanOperation, Call, @@ -70,6 +72,9 @@ if TYPE_CHECKING: StarredElement, Subscript, SubscriptElement, + TemplatedString, + TemplatedStringExpression, + TemplatedStringText, Tuple, UnaryOperation, Yield, @@ -147,6 +152,7 @@ if TYPE_CHECKING: Del, Else, ExceptHandler, + ExceptStarHandler, Expr, Finally, For, @@ -157,14 +163,38 @@ if TYPE_CHECKING: ImportAlias, ImportFrom, IndentedBlock, + Match, + MatchAs, + MatchCase, + MatchClass, + MatchKeywordElement, + MatchList, + MatchMapping, + MatchMappingElement, + MatchOr, + MatchOrElement, + MatchPattern, + MatchSequence, + MatchSequenceElement, + MatchSingleton, + MatchStar, + MatchTuple, + MatchValue, NameItem, Nonlocal, + ParamSpec, Pass, Raise, Return, SimpleStatementLine, SimpleStatementSuite, Try, + TryStar, + TypeAlias, + TypeParam, + TypeParameters, + TypeVar, + TypeVarTuple, While, With, WithItem, @@ -181,6 +211,7 @@ if TYPE_CHECKING: class CSTTypedBaseFunctions: + @mark_no_op def visit_Add(self, node: "Add") -> Optional[bool]: pass @@ -1033,6 +1064,22 @@ class CSTTypedBaseFunctions: def leave_ClassDef_whitespace_before_colon(self, node: "ClassDef") -> None: pass + @mark_no_op + def visit_ClassDef_type_parameters(self, node: "ClassDef") -> None: + pass + + @mark_no_op + def leave_ClassDef_type_parameters(self, node: "ClassDef") -> None: + pass + + @mark_no_op + def visit_ClassDef_whitespace_after_type_parameters(self, node: "ClassDef") -> None: + pass + + @mark_no_op + def leave_ClassDef_whitespace_after_type_parameters(self, node: "ClassDef") -> None: + pass + @mark_no_op def visit_Colon(self, node: "Colon") -> Optional[bool]: pass @@ -1777,6 +1824,78 @@ class CSTTypedBaseFunctions: ) -> None: pass + @mark_no_op + def visit_ExceptStarHandler(self, node: "ExceptStarHandler") -> Optional[bool]: + pass + + @mark_no_op + def visit_ExceptStarHandler_body(self, node: "ExceptStarHandler") -> None: + pass + + @mark_no_op + def leave_ExceptStarHandler_body(self, node: "ExceptStarHandler") -> None: + pass + + @mark_no_op + def visit_ExceptStarHandler_type(self, node: "ExceptStarHandler") -> None: + pass + + @mark_no_op + def leave_ExceptStarHandler_type(self, node: "ExceptStarHandler") -> None: + pass + + @mark_no_op + def visit_ExceptStarHandler_name(self, node: "ExceptStarHandler") -> None: + pass + + @mark_no_op + def leave_ExceptStarHandler_name(self, node: "ExceptStarHandler") -> None: + pass + + @mark_no_op + def visit_ExceptStarHandler_leading_lines(self, node: "ExceptStarHandler") -> None: + pass + + @mark_no_op + def leave_ExceptStarHandler_leading_lines(self, node: "ExceptStarHandler") -> None: + pass + + @mark_no_op + def visit_ExceptStarHandler_whitespace_after_except( + self, node: "ExceptStarHandler" + ) -> None: + pass + + @mark_no_op + def leave_ExceptStarHandler_whitespace_after_except( + self, node: "ExceptStarHandler" + ) -> None: + pass + + @mark_no_op + def visit_ExceptStarHandler_whitespace_after_star( + self, node: "ExceptStarHandler" + ) -> None: + pass + + @mark_no_op + def leave_ExceptStarHandler_whitespace_after_star( + self, node: "ExceptStarHandler" + ) -> None: + pass + + @mark_no_op + def visit_ExceptStarHandler_whitespace_before_colon( + self, node: "ExceptStarHandler" + ) -> None: + pass + + @mark_no_op + def leave_ExceptStarHandler_whitespace_before_colon( + self, node: "ExceptStarHandler" + ) -> None: + pass + @mark_no_op def visit_Expr(self, node: "Expr") -> Optional[bool]: pass @@ -2247,6 +2366,26 @@ class CSTTypedBaseFunctions: def leave_FunctionDef_whitespace_before_colon(self, node: "FunctionDef") -> None: pass + @mark_no_op + def visit_FunctionDef_type_parameters(self, node: "FunctionDef") -> None: + pass + + @mark_no_op + def leave_FunctionDef_type_parameters(self, node: "FunctionDef") -> None: + pass + + @mark_no_op + def visit_FunctionDef_whitespace_after_type_parameters( + self, node: "FunctionDef" + ) -> None: + pass + + @mark_no_op + def leave_FunctionDef_whitespace_after_type_parameters( + self, node: "FunctionDef" + ) -> None: + pass + @mark_no_op def visit_GeneratorExp(self, node: "GeneratorExp") -> Optional[bool]: pass @@ -2715,6 +2854,22 @@ class CSTTypedBaseFunctions: def leave_Index_value(self, node: "Index") -> None: pass + @mark_no_op + def visit_Index_star(self, node: "Index") -> None: + pass + + @mark_no_op + def leave_Index_star(self, node: "Index") -> None: + pass + + @mark_no_op + def visit_Index_whitespace_after_star(self, node: "Index") -> None: + pass + + @mark_no_op + def leave_Index_whitespace_after_star(self, node: "Index") -> None: + pass + @mark_no_op def visit_Integer(self, node: "Integer") -> Optional[bool]: pass @@ -3059,6 +3214,636 @@ class CSTTypedBaseFunctions: def leave_ListComp_rpar(self, node: "ListComp") -> None: pass + @mark_no_op + def visit_Match(self, node: "Match") -> Optional[bool]: + pass + + @mark_no_op + def visit_Match_subject(self, node: "Match") -> None: + pass + + @mark_no_op + def leave_Match_subject(self, node: "Match") -> None: + pass + + @mark_no_op + def visit_Match_cases(self, node: "Match") -> None: + pass + + @mark_no_op + def leave_Match_cases(self, node: "Match") -> None: + pass + + @mark_no_op + def visit_Match_leading_lines(self, node: "Match") -> None: + pass + + @mark_no_op + def leave_Match_leading_lines(self, node: "Match") -> None: + pass + + @mark_no_op + def visit_Match_whitespace_after_match(self, node: "Match") -> None: + pass + + @mark_no_op + def leave_Match_whitespace_after_match(self, node: "Match") -> None: + pass + + @mark_no_op + def visit_Match_whitespace_before_colon(self, node: "Match") -> None: + pass + + @mark_no_op + def leave_Match_whitespace_before_colon(self, node: "Match") -> None: + pass + + @mark_no_op + def visit_Match_whitespace_after_colon(self, node: "Match") -> None: + pass + + @mark_no_op + def leave_Match_whitespace_after_colon(self, node: "Match") -> None: + pass + + @mark_no_op + def visit_Match_indent(self, node: "Match") -> None: + pass + + @mark_no_op + def leave_Match_indent(self, node: "Match") -> None: + pass + + @mark_no_op + def visit_Match_footer(self, node: "Match") -> None: + pass + + @mark_no_op + def leave_Match_footer(self, node: "Match") -> None: + pass + + @mark_no_op + def visit_MatchAs(self, node: "MatchAs") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchAs_pattern(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def leave_MatchAs_pattern(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def visit_MatchAs_name(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def leave_MatchAs_name(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def visit_MatchAs_whitespace_before_as(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def leave_MatchAs_whitespace_before_as(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def visit_MatchAs_whitespace_after_as(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def leave_MatchAs_whitespace_after_as(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def visit_MatchAs_lpar(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def leave_MatchAs_lpar(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def visit_MatchAs_rpar(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def leave_MatchAs_rpar(self, node: "MatchAs") -> None: + pass + + @mark_no_op + def visit_MatchCase(self, node: "MatchCase") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchCase_pattern(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchCase_pattern(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def visit_MatchCase_body(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchCase_body(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def visit_MatchCase_guard(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchCase_guard(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def visit_MatchCase_leading_lines(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchCase_leading_lines(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def visit_MatchCase_whitespace_after_case(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchCase_whitespace_after_case(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def visit_MatchCase_whitespace_before_if(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchCase_whitespace_before_if(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def visit_MatchCase_whitespace_after_if(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchCase_whitespace_after_if(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def visit_MatchCase_whitespace_before_colon(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchCase_whitespace_before_colon(self, node: "MatchCase") -> None: + pass + + @mark_no_op + def visit_MatchClass(self, node: "MatchClass") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchClass_cls(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchClass_cls(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def visit_MatchClass_patterns(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchClass_patterns(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def visit_MatchClass_kwds(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchClass_kwds(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def visit_MatchClass_whitespace_after_cls(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchClass_whitespace_after_cls(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def visit_MatchClass_whitespace_before_patterns(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchClass_whitespace_before_patterns(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def visit_MatchClass_whitespace_after_kwds(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchClass_whitespace_after_kwds(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def visit_MatchClass_lpar(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchClass_lpar(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def visit_MatchClass_rpar(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchClass_rpar(self, node: "MatchClass") -> None: + pass + + @mark_no_op + def visit_MatchKeywordElement(self, node: "MatchKeywordElement") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchKeywordElement_key(self, node: "MatchKeywordElement") -> None: + pass + + @mark_no_op + def leave_MatchKeywordElement_key(self, node: "MatchKeywordElement") -> None: + pass + + @mark_no_op + def visit_MatchKeywordElement_pattern(self, node: "MatchKeywordElement") -> None: + pass + + @mark_no_op + def leave_MatchKeywordElement_pattern(self, node: "MatchKeywordElement") -> None: + pass + + @mark_no_op + def visit_MatchKeywordElement_comma(self, node: "MatchKeywordElement") -> None: + pass + + @mark_no_op + def leave_MatchKeywordElement_comma(self, node: "MatchKeywordElement") -> None: + pass + + @mark_no_op + def visit_MatchKeywordElement_whitespace_before_equal( + self, node: "MatchKeywordElement" + ) -> None: + pass + + @mark_no_op + def leave_MatchKeywordElement_whitespace_before_equal( + self, node: "MatchKeywordElement" + ) -> None: + pass + + @mark_no_op + def visit_MatchKeywordElement_whitespace_after_equal( + self, node: "MatchKeywordElement" + ) -> None: + pass + + @mark_no_op + def leave_MatchKeywordElement_whitespace_after_equal( + self, node: "MatchKeywordElement" + ) -> None: + pass + + @mark_no_op + def visit_MatchList(self, node: "MatchList") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchList_patterns(self, node: "MatchList") -> None: + pass + + @mark_no_op + def leave_MatchList_patterns(self, node: "MatchList") -> None: + pass + + @mark_no_op + def visit_MatchList_lbracket(self, node: "MatchList") -> None: + pass + + @mark_no_op + def leave_MatchList_lbracket(self, node: "MatchList") -> None: + pass + + @mark_no_op + def visit_MatchList_rbracket(self, node: "MatchList") -> None: + pass + + @mark_no_op + def leave_MatchList_rbracket(self, node: "MatchList") -> None: + pass + + @mark_no_op + def visit_MatchList_lpar(self, node: "MatchList") -> None: + pass + + @mark_no_op + def leave_MatchList_lpar(self, node: "MatchList") -> None: + pass + + @mark_no_op + def visit_MatchList_rpar(self, node: "MatchList") -> None: + pass + + @mark_no_op + def leave_MatchList_rpar(self, node: "MatchList") -> None: + pass + + @mark_no_op + def visit_MatchMapping(self, node: "MatchMapping") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchMapping_elements(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMapping_elements(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def visit_MatchMapping_lbrace(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMapping_lbrace(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def visit_MatchMapping_rbrace(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMapping_rbrace(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def visit_MatchMapping_rest(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMapping_rest(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def visit_MatchMapping_whitespace_before_rest(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMapping_whitespace_before_rest(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def visit_MatchMapping_trailing_comma(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMapping_trailing_comma(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def visit_MatchMapping_lpar(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMapping_lpar(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def visit_MatchMapping_rpar(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMapping_rpar(self, node: "MatchMapping") -> None: + pass + + @mark_no_op + def visit_MatchMappingElement(self, node: "MatchMappingElement") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchMappingElement_key(self, node: "MatchMappingElement") -> None: + pass + + @mark_no_op + def leave_MatchMappingElement_key(self, node: "MatchMappingElement") -> None: + pass + + @mark_no_op + def visit_MatchMappingElement_pattern(self, node: "MatchMappingElement") -> None: + pass + + @mark_no_op + def leave_MatchMappingElement_pattern(self, node: "MatchMappingElement") -> None: + pass + + @mark_no_op + def visit_MatchMappingElement_comma(self, node: "MatchMappingElement") -> None: + pass + + @mark_no_op + def leave_MatchMappingElement_comma(self, node: "MatchMappingElement") -> None: + pass + + @mark_no_op + def visit_MatchMappingElement_whitespace_before_colon( + self, node: "MatchMappingElement" + ) -> None: + pass + + @mark_no_op + def leave_MatchMappingElement_whitespace_before_colon( + self, node: "MatchMappingElement" + ) -> None: + pass + + @mark_no_op + def visit_MatchMappingElement_whitespace_after_colon( + self, node: "MatchMappingElement" + ) -> None: + pass + + @mark_no_op + def leave_MatchMappingElement_whitespace_after_colon( + self, node: "MatchMappingElement" + ) -> None: + pass + + @mark_no_op + def visit_MatchOr(self, node: "MatchOr") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchOr_patterns(self, node: "MatchOr") -> None: + pass + + @mark_no_op + def leave_MatchOr_patterns(self, node: "MatchOr") -> None: + pass + + @mark_no_op + def visit_MatchOr_lpar(self, node: "MatchOr") -> None: + pass + + @mark_no_op + def leave_MatchOr_lpar(self, node: "MatchOr") -> None: + pass + + @mark_no_op + def visit_MatchOr_rpar(self, node: "MatchOr") -> None: + pass + + @mark_no_op + def leave_MatchOr_rpar(self, node: "MatchOr") -> None: + pass + + @mark_no_op + def visit_MatchOrElement(self, node: "MatchOrElement") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchOrElement_pattern(self, node: "MatchOrElement") -> None: + pass + + @mark_no_op + def leave_MatchOrElement_pattern(self, node: "MatchOrElement") -> None: + pass + + @mark_no_op + def visit_MatchOrElement_separator(self, node: "MatchOrElement") -> None: + pass + + @mark_no_op + def leave_MatchOrElement_separator(self, node: "MatchOrElement") -> None: + pass + + @mark_no_op + def visit_MatchPattern(self, node: "MatchPattern") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchSequence(self, node: "MatchSequence") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchSequenceElement( + self, node: "MatchSequenceElement" + ) -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchSequenceElement_value(self, node: "MatchSequenceElement") -> None: + pass + + @mark_no_op + def leave_MatchSequenceElement_value(self, node: "MatchSequenceElement") -> None: + pass + + @mark_no_op + def visit_MatchSequenceElement_comma(self, node: "MatchSequenceElement") -> None: + pass + + @mark_no_op + def leave_MatchSequenceElement_comma(self, node: "MatchSequenceElement") -> None: + pass + + @mark_no_op + def visit_MatchSingleton(self, node: "MatchSingleton") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchSingleton_value(self, node: "MatchSingleton") -> None: + pass + + @mark_no_op + def leave_MatchSingleton_value(self, node: "MatchSingleton") -> None: + pass + + @mark_no_op + def visit_MatchStar(self, node: "MatchStar") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchStar_name(self, node: "MatchStar") -> None: + pass + + @mark_no_op + def leave_MatchStar_name(self, node: "MatchStar") -> None: + pass + + @mark_no_op + def visit_MatchStar_comma(self, node: "MatchStar") -> None: + pass + + @mark_no_op + def leave_MatchStar_comma(self, node: "MatchStar") -> None: + pass + + @mark_no_op + def visit_MatchStar_whitespace_before_name(self, node: "MatchStar") -> None: + pass + + @mark_no_op + def leave_MatchStar_whitespace_before_name(self, node: "MatchStar") -> None: + pass + + @mark_no_op + def visit_MatchTuple(self, node: "MatchTuple") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchTuple_patterns(self, node: "MatchTuple") -> None: + pass + + @mark_no_op + def leave_MatchTuple_patterns(self, node: "MatchTuple") -> None: + pass + + @mark_no_op + def visit_MatchTuple_lpar(self, node: "MatchTuple") -> None: + pass + + @mark_no_op + def leave_MatchTuple_lpar(self, node: "MatchTuple") -> None: + pass + + @mark_no_op + def visit_MatchTuple_rpar(self, node: "MatchTuple") -> None: + pass + + @mark_no_op + def leave_MatchTuple_rpar(self, node: "MatchTuple") -> None: + pass + + @mark_no_op + def visit_MatchValue(self, node: "MatchValue") -> Optional[bool]: + pass + + @mark_no_op + def visit_MatchValue_value(self, node: "MatchValue") -> None: + pass + + @mark_no_op + def leave_MatchValue_value(self, node: "MatchValue") -> None: + pass + @mark_no_op def visit_MatrixMultiply(self, node: "MatrixMultiply") -> Optional[bool]: pass @@ -3569,6 +4354,34 @@ class CSTTypedBaseFunctions: def leave_ParamSlash_comma(self, node: "ParamSlash") -> None: pass + @mark_no_op + def visit_ParamSlash_whitespace_after(self, node: "ParamSlash") -> None: + pass + + @mark_no_op + def leave_ParamSlash_whitespace_after(self, node: "ParamSlash") -> None: + pass + + @mark_no_op + def visit_ParamSpec(self, node: "ParamSpec") -> Optional[bool]: + pass + + @mark_no_op + def visit_ParamSpec_name(self, node: "ParamSpec") -> None: + pass + + @mark_no_op + def leave_ParamSpec_name(self, node: "ParamSpec") -> None: + pass + + @mark_no_op + def visit_ParamSpec_whitespace_after_star(self, node: "ParamSpec") -> None: + pass + + @mark_no_op + def leave_ParamSpec_whitespace_after_star(self, node: "ParamSpec") -> None: + pass + @mark_no_op def visit_ParamStar(self, node: "ParamStar") -> Optional[bool]: pass @@ -4373,6 +5186,140 @@ class CSTTypedBaseFunctions: def leave_SubtractAssign_whitespace_after(self, node: "SubtractAssign") -> None: pass + @mark_no_op + def visit_TemplatedString(self, node: "TemplatedString") -> Optional[bool]: + pass + + @mark_no_op + def visit_TemplatedString_parts(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def leave_TemplatedString_parts(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def visit_TemplatedString_start(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def leave_TemplatedString_start(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def visit_TemplatedString_end(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def leave_TemplatedString_end(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def visit_TemplatedString_lpar(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def leave_TemplatedString_lpar(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def visit_TemplatedString_rpar(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def leave_TemplatedString_rpar(self, node: "TemplatedString") -> None: + pass + + @mark_no_op + def visit_TemplatedStringExpression( + self, node: "TemplatedStringExpression" + ) -> Optional[bool]: + pass + + @mark_no_op + def visit_TemplatedStringExpression_expression( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def leave_TemplatedStringExpression_expression( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def visit_TemplatedStringExpression_conversion( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def leave_TemplatedStringExpression_conversion( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def visit_TemplatedStringExpression_format_spec( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def leave_TemplatedStringExpression_format_spec( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def visit_TemplatedStringExpression_whitespace_before_expression( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def leave_TemplatedStringExpression_whitespace_before_expression( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def visit_TemplatedStringExpression_whitespace_after_expression( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def leave_TemplatedStringExpression_whitespace_after_expression( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def visit_TemplatedStringExpression_equal( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def leave_TemplatedStringExpression_equal( + self, node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def visit_TemplatedStringText(self, node: "TemplatedStringText") -> Optional[bool]: + pass + + @mark_no_op + def visit_TemplatedStringText_value(self, node: "TemplatedStringText") -> None: + pass + + @mark_no_op + def leave_TemplatedStringText_value(self, node: "TemplatedStringText") -> None: + pass + @mark_no_op def visit_TrailingWhitespace(self, node: "TrailingWhitespace") -> Optional[bool]: pass @@ -4453,6 +5400,58 @@ class CSTTypedBaseFunctions: def leave_Try_whitespace_before_colon(self, node: "Try") -> None: pass + @mark_no_op + def visit_TryStar(self, node: "TryStar") -> Optional[bool]: + pass + + @mark_no_op + def visit_TryStar_body(self, node: "TryStar") -> None: + pass + + @mark_no_op + def leave_TryStar_body(self, node: "TryStar") -> None: + pass + + @mark_no_op + def visit_TryStar_handlers(self, node: "TryStar") -> None: + pass + + @mark_no_op + def leave_TryStar_handlers(self, node: "TryStar") -> None: + pass + + @mark_no_op + def visit_TryStar_orelse(self, node: "TryStar") -> None: + pass + + @mark_no_op + def leave_TryStar_orelse(self, node: "TryStar") -> None: + pass + + @mark_no_op + def visit_TryStar_finalbody(self, node: "TryStar") -> None: + pass + + @mark_no_op + def leave_TryStar_finalbody(self, node: "TryStar") -> None: + pass + + @mark_no_op + def visit_TryStar_leading_lines(self, node: "TryStar") -> None: + pass + + @mark_no_op + def leave_TryStar_leading_lines(self, node: "TryStar") -> None: + pass + + @mark_no_op + def visit_TryStar_whitespace_before_colon(self, node: "TryStar") -> None: + pass + + @mark_no_op + def leave_TryStar_whitespace_before_colon(self, node: "TryStar") -> None: + pass + @mark_no_op def visit_Tuple(self, node: "Tuple") -> Optional[bool]: pass @@ -4481,6 +5480,206 @@ class CSTTypedBaseFunctions: def leave_Tuple_rpar(self, node: "Tuple") -> None: pass + @mark_no_op + def visit_TypeAlias(self, node: "TypeAlias") -> Optional[bool]: + pass + + @mark_no_op + def visit_TypeAlias_name(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def leave_TypeAlias_name(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def visit_TypeAlias_value(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def leave_TypeAlias_value(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def visit_TypeAlias_type_parameters(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def leave_TypeAlias_type_parameters(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def visit_TypeAlias_whitespace_after_type(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def leave_TypeAlias_whitespace_after_type(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def visit_TypeAlias_whitespace_after_name(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def leave_TypeAlias_whitespace_after_name(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def visit_TypeAlias_whitespace_after_type_parameters( + self, node: "TypeAlias" + ) -> None: + pass + + @mark_no_op + def leave_TypeAlias_whitespace_after_type_parameters( + self, node: "TypeAlias" + ) -> None: + pass + + @mark_no_op + def visit_TypeAlias_whitespace_after_equals(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def leave_TypeAlias_whitespace_after_equals(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def visit_TypeAlias_semicolon(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def leave_TypeAlias_semicolon(self, node: "TypeAlias") -> None: + pass + + @mark_no_op + def visit_TypeParam(self, node: "TypeParam") -> Optional[bool]: + pass + + @mark_no_op + def visit_TypeParam_param(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def leave_TypeParam_param(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def visit_TypeParam_comma(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def leave_TypeParam_comma(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def visit_TypeParam_equal(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def leave_TypeParam_equal(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def visit_TypeParam_star(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def leave_TypeParam_star(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def visit_TypeParam_whitespace_after_star(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def leave_TypeParam_whitespace_after_star(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def visit_TypeParam_default(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def leave_TypeParam_default(self, node: "TypeParam") -> None: + pass + + @mark_no_op + def visit_TypeParameters(self, node: "TypeParameters") -> Optional[bool]: + pass + + @mark_no_op + def visit_TypeParameters_params(self, node: "TypeParameters") -> None: + pass + + @mark_no_op + def leave_TypeParameters_params(self, node: "TypeParameters") -> None: + pass + + @mark_no_op + def visit_TypeParameters_lbracket(self, node: "TypeParameters") -> None: + pass + + @mark_no_op + def leave_TypeParameters_lbracket(self, node: "TypeParameters") -> None: + pass + + @mark_no_op + def visit_TypeParameters_rbracket(self, node: "TypeParameters") -> None: + pass + + @mark_no_op + def leave_TypeParameters_rbracket(self, node: "TypeParameters") -> None: + pass + + @mark_no_op + def visit_TypeVar(self, node: "TypeVar") -> Optional[bool]: + pass + + @mark_no_op + def visit_TypeVar_name(self, node: "TypeVar") -> None: + pass + + @mark_no_op + def leave_TypeVar_name(self, node: "TypeVar") -> None: + pass + + @mark_no_op + def visit_TypeVar_bound(self, node: "TypeVar") -> None: + pass + + @mark_no_op + def leave_TypeVar_bound(self, node: "TypeVar") -> None: + pass + + @mark_no_op + def visit_TypeVar_colon(self, node: "TypeVar") -> None: + pass + + @mark_no_op + def leave_TypeVar_colon(self, node: "TypeVar") -> None: + pass + + @mark_no_op + def visit_TypeVarTuple(self, node: "TypeVarTuple") -> Optional[bool]: + pass + + @mark_no_op + def visit_TypeVarTuple_name(self, node: "TypeVarTuple") -> None: + pass + + @mark_no_op + def leave_TypeVarTuple_name(self, node: "TypeVarTuple") -> None: + pass + + @mark_no_op + def visit_TypeVarTuple_whitespace_after_star(self, node: "TypeVarTuple") -> None: + pass + + @mark_no_op + def leave_TypeVarTuple_whitespace_after_star(self, node: "TypeVarTuple") -> None: + pass + @mark_no_op def visit_UnaryOperation(self, node: "UnaryOperation") -> Optional[bool]: pass @@ -4605,6 +5804,22 @@ class CSTTypedBaseFunctions: def leave_With_leading_lines(self, node: "With") -> None: pass + @mark_no_op + def visit_With_lpar(self, node: "With") -> None: + pass + + @mark_no_op + def leave_With_lpar(self, node: "With") -> None: + pass + + @mark_no_op + def visit_With_rpar(self, node: "With") -> None: + pass + + @mark_no_op + def leave_With_rpar(self, node: "With") -> None: + pass + @mark_no_op def visit_With_whitespace_after_with(self, node: "With") -> None: pass @@ -4687,6 +5902,7 @@ class CSTTypedBaseFunctions: class CSTTypedVisitorFunctions(CSTTypedBaseFunctions): + @mark_no_op def leave_Add(self, original_node: "Add") -> None: pass @@ -4887,6 +6103,10 @@ class CSTTypedVisitorFunctions(CSTTypedBaseFunctions): def leave_ExceptHandler(self, original_node: "ExceptHandler") -> None: pass + @mark_no_op + def leave_ExceptStarHandler(self, original_node: "ExceptStarHandler") -> None: + pass + @mark_no_op def leave_Expr(self, original_node: "Expr") -> None: pass @@ -5041,6 +6261,74 @@ class CSTTypedVisitorFunctions(CSTTypedBaseFunctions): def leave_ListComp(self, original_node: "ListComp") -> None: pass + @mark_no_op + def leave_Match(self, original_node: "Match") -> None: + pass + + @mark_no_op + def leave_MatchAs(self, original_node: "MatchAs") -> None: + pass + + @mark_no_op + def leave_MatchCase(self, original_node: "MatchCase") -> None: + pass + + @mark_no_op + def leave_MatchClass(self, original_node: "MatchClass") -> None: + pass + + @mark_no_op + def leave_MatchKeywordElement(self, original_node: "MatchKeywordElement") -> None: + pass + + @mark_no_op + def leave_MatchList(self, original_node: "MatchList") -> None: + pass + + @mark_no_op + def leave_MatchMapping(self, original_node: "MatchMapping") -> None: + pass + + @mark_no_op + def leave_MatchMappingElement(self, original_node: "MatchMappingElement") -> None: + pass + + @mark_no_op + def leave_MatchOr(self, original_node: "MatchOr") -> None: + pass + + @mark_no_op + def leave_MatchOrElement(self, original_node: "MatchOrElement") -> None: + pass + + @mark_no_op + def leave_MatchPattern(self, original_node: "MatchPattern") -> None: + pass + + @mark_no_op + def leave_MatchSequence(self, original_node: "MatchSequence") -> None: + pass + + @mark_no_op + def leave_MatchSequenceElement(self, original_node: "MatchSequenceElement") -> None: + pass + + @mark_no_op + def leave_MatchSingleton(self, original_node: "MatchSingleton") -> None: + pass + + @mark_no_op + def leave_MatchStar(self, original_node: "MatchStar") -> None: + pass + + @mark_no_op + def leave_MatchTuple(self, original_node: "MatchTuple") -> None: + pass + + @mark_no_op + def leave_MatchValue(self, original_node: "MatchValue") -> None: + pass + @mark_no_op def leave_MatrixMultiply(self, original_node: "MatrixMultiply") -> None: pass @@ -5117,6 +6405,10 @@ class CSTTypedVisitorFunctions(CSTTypedBaseFunctions): def leave_ParamSlash(self, original_node: "ParamSlash") -> None: pass + @mark_no_op + def leave_ParamSpec(self, original_node: "ParamSpec") -> None: + pass + @mark_no_op def leave_ParamStar(self, original_node: "ParamStar") -> None: pass @@ -5231,6 +6523,20 @@ class CSTTypedVisitorFunctions(CSTTypedBaseFunctions): def leave_SubtractAssign(self, original_node: "SubtractAssign") -> None: pass + @mark_no_op + def leave_TemplatedString(self, original_node: "TemplatedString") -> None: + pass + + @mark_no_op + def leave_TemplatedStringExpression( + self, original_node: "TemplatedStringExpression" + ) -> None: + pass + + @mark_no_op + def leave_TemplatedStringText(self, original_node: "TemplatedStringText") -> None: + pass + @mark_no_op def leave_TrailingWhitespace(self, original_node: "TrailingWhitespace") -> None: pass @@ -5239,10 +6545,34 @@ class CSTTypedVisitorFunctions(CSTTypedBaseFunctions): def leave_Try(self, original_node: "Try") -> None: pass + @mark_no_op + def leave_TryStar(self, original_node: "TryStar") -> None: + pass + @mark_no_op def leave_Tuple(self, original_node: "Tuple") -> None: pass + @mark_no_op + def leave_TypeAlias(self, original_node: "TypeAlias") -> None: + pass + + @mark_no_op + def leave_TypeParam(self, original_node: "TypeParam") -> None: + pass + + @mark_no_op + def leave_TypeParameters(self, original_node: "TypeParameters") -> None: + pass + + @mark_no_op + def leave_TypeVar(self, original_node: "TypeVar") -> None: + pass + + @mark_no_op + def leave_TypeVarTuple(self, original_node: "TypeVarTuple") -> None: + pass + @mark_no_op def leave_UnaryOperation(self, original_node: "UnaryOperation") -> None: pass @@ -5265,7 +6595,6 @@ class CSTTypedVisitorFunctions(CSTTypedBaseFunctions): class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): - pass @mark_no_op def leave_Add(self, original_node: "Add", updated_node: "Add") -> "BaseBinaryOp": @@ -5284,7 +6613,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_AnnAssign( self, original_node: "AnnAssign", updated_node: "AnnAssign" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5296,7 +6627,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Arg( self, original_node: "Arg", updated_node: "Arg" - ) -> Union["Arg", RemovalSentinel]: + ) -> Union["Arg", FlattenSentinel["Arg"], RemovalSentinel]: return updated_node @mark_no_op @@ -5306,13 +6637,17 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Assert( self, original_node: "Assert", updated_node: "Assert" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op def leave_Assign( self, original_node: "Assign", updated_node: "Assign" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5324,7 +6659,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_AssignTarget( self, original_node: "AssignTarget", updated_node: "AssignTarget" - ) -> Union["AssignTarget", RemovalSentinel]: + ) -> Union["AssignTarget", FlattenSentinel["AssignTarget"], RemovalSentinel]: return updated_node @mark_no_op @@ -5342,7 +6677,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_AugAssign( self, original_node: "AugAssign", updated_node: "AugAssign" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5378,7 +6715,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_BitOr( self, original_node: "BitOr", updated_node: "BitOr" - ) -> "BaseBinaryOp": + ) -> Union["BaseBinaryOp", MaybeSentinel]: return updated_node @mark_no_op @@ -5408,7 +6745,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Break( self, original_node: "Break", updated_node: "Break" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5420,7 +6759,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_ClassDef( self, original_node: "ClassDef", updated_node: "ClassDef" - ) -> Union["BaseStatement", RemovalSentinel]: + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op @@ -5460,7 +6799,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_ComparisonTarget( self, original_node: "ComparisonTarget", updated_node: "ComparisonTarget" - ) -> Union["ComparisonTarget", RemovalSentinel]: + ) -> Union[ + "ComparisonTarget", FlattenSentinel["ComparisonTarget"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5472,19 +6813,23 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Continue( self, original_node: "Continue", updated_node: "Continue" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op def leave_Decorator( self, original_node: "Decorator", updated_node: "Decorator" - ) -> Union["Decorator", RemovalSentinel]: + ) -> Union["Decorator", FlattenSentinel["Decorator"], RemovalSentinel]: return updated_node @mark_no_op def leave_Del( self, original_node: "Del", updated_node: "Del" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5502,7 +6847,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_DictElement( self, original_node: "DictElement", updated_node: "DictElement" - ) -> Union["BaseDictElement", RemovalSentinel]: + ) -> Union["BaseDictElement", FlattenSentinel["BaseDictElement"], RemovalSentinel]: return updated_node @mark_no_op @@ -5520,13 +6865,13 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Dot( self, original_node: "Dot", updated_node: "Dot" - ) -> Union["Dot", RemovalSentinel]: + ) -> Union["Dot", FlattenSentinel["Dot"], RemovalSentinel]: return updated_node @mark_no_op def leave_Element( self, original_node: "Element", updated_node: "Element" - ) -> Union["BaseElement", RemovalSentinel]: + ) -> Union["BaseElement", FlattenSentinel["BaseElement"], RemovalSentinel]: return updated_node @mark_no_op @@ -5542,7 +6887,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_EmptyLine( self, original_node: "EmptyLine", updated_node: "EmptyLine" - ) -> Union["EmptyLine", RemovalSentinel]: + ) -> Union["EmptyLine", FlattenSentinel["EmptyLine"], RemovalSentinel]: return updated_node @mark_no_op @@ -5554,13 +6899,23 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_ExceptHandler( self, original_node: "ExceptHandler", updated_node: "ExceptHandler" - ) -> Union["ExceptHandler", RemovalSentinel]: + ) -> Union["ExceptHandler", FlattenSentinel["ExceptHandler"], RemovalSentinel]: + return updated_node + + @mark_no_op + def leave_ExceptStarHandler( + self, original_node: "ExceptStarHandler", updated_node: "ExceptStarHandler" + ) -> Union[ + "ExceptStarHandler", FlattenSentinel["ExceptStarHandler"], RemovalSentinel + ]: return updated_node @mark_no_op def leave_Expr( self, original_node: "Expr", updated_node: "Expr" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5590,7 +6945,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_For( self, original_node: "For", updated_node: "For" - ) -> Union["BaseStatement", RemovalSentinel]: + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op @@ -5604,13 +6959,21 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): self, original_node: "FormattedStringExpression", updated_node: "FormattedStringExpression", - ) -> Union["BaseFormattedStringContent", RemovalSentinel]: + ) -> Union[ + "BaseFormattedStringContent", + FlattenSentinel["BaseFormattedStringContent"], + RemovalSentinel, + ]: return updated_node @mark_no_op def leave_FormattedStringText( self, original_node: "FormattedStringText", updated_node: "FormattedStringText" - ) -> Union["BaseFormattedStringContent", RemovalSentinel]: + ) -> Union[ + "BaseFormattedStringContent", + FlattenSentinel["BaseFormattedStringContent"], + RemovalSentinel, + ]: return updated_node @mark_no_op @@ -5620,7 +6983,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_FunctionDef( self, original_node: "FunctionDef", updated_node: "FunctionDef" - ) -> Union["BaseStatement", RemovalSentinel]: + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op @@ -5632,7 +6995,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Global( self, original_node: "Global", updated_node: "Global" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5650,7 +7015,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_If( self, original_node: "If", updated_node: "If" - ) -> Union["BaseStatement", RemovalSentinel]: + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op @@ -5668,19 +7033,23 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Import( self, original_node: "Import", updated_node: "Import" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op def leave_ImportAlias( self, original_node: "ImportAlias", updated_node: "ImportAlias" - ) -> Union["ImportAlias", RemovalSentinel]: + ) -> Union["ImportAlias", FlattenSentinel["ImportAlias"], RemovalSentinel]: return updated_node @mark_no_op def leave_ImportFrom( self, original_node: "ImportFrom", updated_node: "ImportFrom" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5734,7 +7103,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_LeftParen( self, original_node: "LeftParen", updated_node: "LeftParen" - ) -> Union["LeftParen", MaybeSentinel, RemovalSentinel]: + ) -> Union[ + "LeftParen", MaybeSentinel, FlattenSentinel["LeftParen"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5779,6 +7150,116 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): ) -> "BaseExpression": return updated_node + @mark_no_op + def leave_Match( + self, original_node: "Match", updated_node: "Match" + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: + return updated_node + + @mark_no_op + def leave_MatchAs( + self, original_node: "MatchAs", updated_node: "MatchAs" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchCase( + self, original_node: "MatchCase", updated_node: "MatchCase" + ) -> "MatchCase": + return updated_node + + @mark_no_op + def leave_MatchClass( + self, original_node: "MatchClass", updated_node: "MatchClass" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchKeywordElement( + self, original_node: "MatchKeywordElement", updated_node: "MatchKeywordElement" + ) -> Union[ + "MatchKeywordElement", FlattenSentinel["MatchKeywordElement"], RemovalSentinel + ]: + return updated_node + + @mark_no_op + def leave_MatchList( + self, original_node: "MatchList", updated_node: "MatchList" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchMapping( + self, original_node: "MatchMapping", updated_node: "MatchMapping" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchMappingElement( + self, original_node: "MatchMappingElement", updated_node: "MatchMappingElement" + ) -> Union[ + "MatchMappingElement", FlattenSentinel["MatchMappingElement"], RemovalSentinel + ]: + return updated_node + + @mark_no_op + def leave_MatchOr( + self, original_node: "MatchOr", updated_node: "MatchOr" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchOrElement( + self, original_node: "MatchOrElement", updated_node: "MatchOrElement" + ) -> Union["MatchOrElement", FlattenSentinel["MatchOrElement"], RemovalSentinel]: + return updated_node + + @mark_no_op + def leave_MatchPattern( + self, original_node: "MatchPattern", updated_node: "MatchPattern" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchSequence( + self, original_node: "MatchSequence", updated_node: "MatchSequence" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchSequenceElement( + self, + original_node: "MatchSequenceElement", + updated_node: "MatchSequenceElement", + ) -> Union[ + "MatchSequenceElement", FlattenSentinel["MatchSequenceElement"], RemovalSentinel + ]: + return updated_node + + @mark_no_op + def leave_MatchSingleton( + self, original_node: "MatchSingleton", updated_node: "MatchSingleton" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchStar( + self, original_node: "MatchStar", updated_node: "MatchStar" + ) -> "MatchStar": + return updated_node + + @mark_no_op + def leave_MatchTuple( + self, original_node: "MatchTuple", updated_node: "MatchTuple" + ) -> "MatchPattern": + return updated_node + + @mark_no_op + def leave_MatchValue( + self, original_node: "MatchValue", updated_node: "MatchValue" + ) -> "MatchPattern": + return updated_node + @mark_no_op def leave_MatrixMultiply( self, original_node: "MatrixMultiply", updated_node: "MatrixMultiply" @@ -5836,7 +7317,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_NameItem( self, original_node: "NameItem", updated_node: "NameItem" - ) -> Union["NameItem", RemovalSentinel]: + ) -> Union["NameItem", FlattenSentinel["NameItem"], RemovalSentinel]: return updated_node @mark_no_op @@ -5854,7 +7335,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Nonlocal( self, original_node: "Nonlocal", updated_node: "Nonlocal" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5880,7 +7363,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Param( self, original_node: "Param", updated_node: "Param" - ) -> Union["Param", MaybeSentinel, RemovalSentinel]: + ) -> Union["Param", MaybeSentinel, FlattenSentinel["Param"], RemovalSentinel]: return updated_node @mark_no_op @@ -5889,6 +7372,12 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): ) -> Union["ParamSlash", MaybeSentinel]: return updated_node + @mark_no_op + def leave_ParamSpec( + self, original_node: "ParamSpec", updated_node: "ParamSpec" + ) -> "ParamSpec": + return updated_node + @mark_no_op def leave_ParamStar( self, original_node: "ParamStar", updated_node: "ParamStar" @@ -5912,7 +7401,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Pass( self, original_node: "Pass", updated_node: "Pass" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5934,13 +7425,17 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Raise( self, original_node: "Raise", updated_node: "Raise" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op def leave_Return( self, original_node: "Return", updated_node: "Return" - ) -> Union["BaseSmallStatement", RemovalSentinel]: + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5952,7 +7447,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_RightParen( self, original_node: "RightParen", updated_node: "RightParen" - ) -> Union["RightParen", MaybeSentinel, RemovalSentinel]: + ) -> Union[ + "RightParen", MaybeSentinel, FlattenSentinel["RightParen"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -5992,7 +7489,7 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_SimpleStatementLine( self, original_node: "SimpleStatementLine", updated_node: "SimpleStatementLine" - ) -> Union["BaseStatement", RemovalSentinel]: + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op @@ -6022,13 +7519,13 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_StarredDictElement( self, original_node: "StarredDictElement", updated_node: "StarredDictElement" - ) -> Union["BaseDictElement", RemovalSentinel]: + ) -> Union["BaseDictElement", FlattenSentinel["BaseDictElement"], RemovalSentinel]: return updated_node @mark_no_op def leave_StarredElement( self, original_node: "StarredElement", updated_node: "StarredElement" - ) -> Union["BaseElement", RemovalSentinel]: + ) -> "BaseExpression": return updated_node @mark_no_op @@ -6040,7 +7537,9 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_SubscriptElement( self, original_node: "SubscriptElement", updated_node: "SubscriptElement" - ) -> Union["SubscriptElement", RemovalSentinel]: + ) -> Union[ + "SubscriptElement", FlattenSentinel["SubscriptElement"], RemovalSentinel + ]: return updated_node @mark_no_op @@ -6055,6 +7554,34 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): ) -> "BaseAugOp": return updated_node + @mark_no_op + def leave_TemplatedString( + self, original_node: "TemplatedString", updated_node: "TemplatedString" + ) -> "BaseExpression": + return updated_node + + @mark_no_op + def leave_TemplatedStringExpression( + self, + original_node: "TemplatedStringExpression", + updated_node: "TemplatedStringExpression", + ) -> Union[ + "BaseTemplatedStringContent", + FlattenSentinel["BaseTemplatedStringContent"], + RemovalSentinel, + ]: + return updated_node + + @mark_no_op + def leave_TemplatedStringText( + self, original_node: "TemplatedStringText", updated_node: "TemplatedStringText" + ) -> Union[ + "BaseTemplatedStringContent", + FlattenSentinel["BaseTemplatedStringContent"], + RemovalSentinel, + ]: + return updated_node + @mark_no_op def leave_TrailingWhitespace( self, original_node: "TrailingWhitespace", updated_node: "TrailingWhitespace" @@ -6064,7 +7591,13 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_Try( self, original_node: "Try", updated_node: "Try" - ) -> Union["BaseStatement", RemovalSentinel]: + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: + return updated_node + + @mark_no_op + def leave_TryStar( + self, original_node: "TryStar", updated_node: "TryStar" + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op @@ -6073,6 +7606,38 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): ) -> "BaseExpression": return updated_node + @mark_no_op + def leave_TypeAlias( + self, original_node: "TypeAlias", updated_node: "TypeAlias" + ) -> Union[ + "BaseSmallStatement", FlattenSentinel["BaseSmallStatement"], RemovalSentinel + ]: + return updated_node + + @mark_no_op + def leave_TypeParam( + self, original_node: "TypeParam", updated_node: "TypeParam" + ) -> Union["TypeParam", FlattenSentinel["TypeParam"], RemovalSentinel]: + return updated_node + + @mark_no_op + def leave_TypeParameters( + self, original_node: "TypeParameters", updated_node: "TypeParameters" + ) -> "TypeParameters": + return updated_node + + @mark_no_op + def leave_TypeVar( + self, original_node: "TypeVar", updated_node: "TypeVar" + ) -> "TypeVar": + return updated_node + + @mark_no_op + def leave_TypeVarTuple( + self, original_node: "TypeVarTuple", updated_node: "TypeVarTuple" + ) -> "TypeVarTuple": + return updated_node + @mark_no_op def leave_UnaryOperation( self, original_node: "UnaryOperation", updated_node: "UnaryOperation" @@ -6082,19 +7647,19 @@ class CSTTypedTransformerFunctions(CSTTypedBaseFunctions): @mark_no_op def leave_While( self, original_node: "While", updated_node: "While" - ) -> Union["BaseStatement", RemovalSentinel]: + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_With( self, original_node: "With", updated_node: "With" - ) -> Union["BaseStatement", RemovalSentinel]: + ) -> Union["BaseStatement", FlattenSentinel["BaseStatement"], RemovalSentinel]: return updated_node @mark_no_op def leave_WithItem( self, original_node: "WithItem", updated_node: "WithItem" - ) -> Union["WithItem", RemovalSentinel]: + ) -> Union["WithItem", FlattenSentinel["WithItem"], RemovalSentinel]: return updated_node @mark_no_op diff --git a/libcst/_typed_visitor_base.py b/libcst/_typed_visitor_base.py index 8525b050..de751a15 100644 --- a/libcst/_typed_visitor_base.py +++ b/libcst/_typed_visitor_base.py @@ -1,14 +1,12 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast +from typing import Any, Callable, cast, TypeVar -if TYPE_CHECKING: - from libcst._typed_visitor import CSTTypedBaseFunctions # noqa: F401 - +# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. F = TypeVar("F", bound=Callable) diff --git a/libcst/_types.py b/libcst/_types.py index 98342da8..24055a5c 100644 --- a/libcst/_types.py +++ b/libcst/_types.py @@ -1,14 +1,16 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import TYPE_CHECKING, TypeVar - +from pathlib import PurePath +from typing import TYPE_CHECKING, TypeVar, Union if TYPE_CHECKING: from libcst._nodes.base import CSTNode # noqa: F401 CSTNodeT = TypeVar("CSTNodeT", bound="CSTNode") +CSTNodeT_co = TypeVar("CSTNodeT_co", bound="CSTNode", covariant=True) +StrPath = Union[str, PurePath] diff --git a/libcst/_visitors.py b/libcst/_visitors.py index 1d710ff2..79ea6f40 100644 --- a/libcst/_visitors.py +++ b/libcst/_visitors.py @@ -1,16 +1,16 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import TYPE_CHECKING, Union +from libcst._flatten_sentinel import FlattenSentinel from libcst._metadata_dependent import MetadataDependent from libcst._removal_sentinel import RemovalSentinel from libcst._typed_visitor import CSTTypedTransformerFunctions, CSTTypedVisitorFunctions from libcst._types import CSTNodeT - if TYPE_CHECKING: # Circular dependency for typing reasons only from libcst._nodes.base import CSTNode # noqa: F401 @@ -49,7 +49,7 @@ class CSTTransformer(CSTTypedTransformerFunctions, MetadataDependent): def on_leave( self, original_node: CSTNodeT, updated_node: CSTNodeT - ) -> Union[CSTNodeT, RemovalSentinel]: + ) -> Union[CSTNodeT, RemovalSentinel, FlattenSentinel[CSTNodeT]]: """ Called every time we leave a node, after we've visited its children. If the :func:`~libcst.CSTTransformer.on_visit` function for this node returns diff --git a/libcst/codegen/__init__.py b/libcst/codegen/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/codegen/__init__.py +++ b/libcst/codegen/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codegen/gather.py b/libcst/codegen/gather.py index fe309b74..5eeaa7d3 100644 --- a/libcst/codegen/gather.py +++ b/libcst/codegen/gather.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,12 +7,12 @@ import inspect from collections import defaultdict from collections.abc import Sequence as ABCSequence from dataclasses import dataclass, fields, replace -from typing import Dict, Generator, List, Mapping, Sequence, Set, Type, Union +from typing import Dict, Iterator, List, Mapping, Sequence, Set, Type, Union import libcst as cst -def _get_bases() -> Generator[Type[cst.CSTNode], None, None]: +def _get_bases() -> Iterator[Type[cst.CSTNode]]: """ Get all base classes that are subclasses of CSTNode but not an actual node itself. This allows us to keep our types sane by refering to the @@ -27,11 +27,11 @@ def _get_bases() -> Generator[Type[cst.CSTNode], None, None]: typeclasses: Sequence[Type[cst.CSTNode]] = sorted( - list(_get_bases()), key=lambda base: base.__name__ + _get_bases(), key=lambda base: base.__name__ ) -def _get_nodes() -> Generator[Type[cst.CSTNode], None, None]: +def _get_nodes() -> Iterator[Type[cst.CSTNode]]: """ Grab all CSTNodes that are not a superclass. Basically, anything that a person might use to generate a tree. @@ -53,7 +53,7 @@ def _get_nodes() -> Generator[Type[cst.CSTNode], None, None]: all_libcst_nodes: Sequence[Type[cst.CSTNode]] = sorted( - list(_get_nodes()), key=lambda node: node.__name__ + _get_nodes(), key=lambda node: node.__name__ ) node_to_bases: Dict[Type[cst.CSTNode], List[Type[cst.CSTNode]]] = {} for node in all_libcst_nodes: diff --git a/libcst/codegen/gen_matcher_classes.py b/libcst/codegen/gen_matcher_classes.py index b0657890..e6def68c 100644 --- a/libcst/codegen/gen_matcher_classes.py +++ b/libcst/codegen/gen_matcher_classes.py @@ -1,17 +1,122 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. +import re from dataclasses import dataclass, fields from typing import Generator, List, Optional, Sequence, Set, Tuple, Type, Union import libcst as cst -from libcst import ensure_type, parse_expression +from libcst import CSTLogicError, ensure_type, parse_expression from libcst.codegen.gather import all_libcst_nodes, typeclasses - CST_DIR: Set[str] = set(dir(cst)) +CLASS_RE = r"" +OPTIONAL_RE = r"typing\.Union\[([^,]*?), NoneType]" + + +class NormalizeUnions(cst.CSTTransformer): + """ + Convert a binary operation with | operators into a Union type. + For example, converts `foo | bar | baz` into `typing.Union[foo, bar, baz]`. + Special case: converts `foo | None` or `None | foo` into `typing.Optional[foo]`. + Also flattens nested typing.Union types. + """ + + def leave_Subscript( + self, original_node: cst.Subscript, updated_node: cst.Subscript + ) -> cst.Subscript: + # Check if this is a typing.Union + if ( + isinstance(updated_node.value, cst.Attribute) + and isinstance(updated_node.value.value, cst.Name) + and updated_node.value.attr.value == "Union" + and updated_node.value.value.value == "typing" + ): + # Collect all operands from any nested Unions + operands: List[cst.BaseExpression] = [] + for slc in updated_node.slice: + if not isinstance(slc.slice, cst.Index): + continue + value = slc.slice.value + # If this is a nested Union, add its elements + if ( + isinstance(value, cst.Subscript) + and isinstance(value.value, cst.Attribute) + and isinstance(value.value.value, cst.Name) + and value.value.attr.value == "Union" + and value.value.value.value == "typing" + ): + operands.extend( + nested_slc.slice.value + for nested_slc in value.slice + if isinstance(nested_slc.slice, cst.Index) + ) + else: + operands.append(value) + + # flatten operands into a Union type + return cst.Subscript( + cst.Attribute(cst.Name("typing"), cst.Name("Union")), + [cst.SubscriptElement(cst.Index(operand)) for operand in operands], + ) + return updated_node + + def leave_BinaryOperation( + self, original_node: cst.BinaryOperation, updated_node: cst.BinaryOperation + ) -> Union[cst.BinaryOperation, cst.Subscript]: + if not updated_node.operator.deep_equals(cst.BitOr()): + return updated_node + + def flatten_binary_op(node: cst.BaseExpression) -> List[cst.BaseExpression]: + """Flatten a binary operation tree into a list of operands.""" + if not isinstance(node, cst.BinaryOperation): + # If it's a Union type, extract its elements + if ( + isinstance(node, cst.Subscript) + and isinstance(node.value, cst.Attribute) + and isinstance(node.value.value, cst.Name) + and node.value.attr.value == "Union" + and node.value.value.value == "typing" + ): + return [ + slc.slice.value + for slc in node.slice + if isinstance(slc.slice, cst.Index) + ] + return [node] + if not node.operator.deep_equals(cst.BitOr()): + return [node] + + left_operands = flatten_binary_op(node.left) + right_operands = flatten_binary_op(node.right) + return left_operands + right_operands + + # Flatten the binary operation tree into a list of operands + operands = flatten_binary_op(updated_node) + + # Check for Optional case (None in union) + none_count = sum( + 1 for op in operands if isinstance(op, cst.Name) and op.value == "None" + ) + if none_count == 1 and len(operands) == 2: + # This is an Optional case - find the non-None operand + non_none = next( + op + for op in operands + if not (isinstance(op, cst.Name) and op.value == "None") + ) + return cst.Subscript( + cst.Attribute(cst.Name("typing"), cst.Name("Optional")), + [cst.SubscriptElement(cst.Index(non_none))], + ) + + # Regular Union case + return cst.Subscript( + cst.Attribute(cst.Name("typing"), cst.Name("Union")), + [cst.SubscriptElement(cst.Index(operand)) for operand in operands], + ) class CleanseFullTypeNames(cst.CSTTransformer): @@ -119,32 +224,14 @@ def _get_match_if_true(oldtype: cst.BaseExpression) -> cst.SubscriptElement: slice=( cst.SubscriptElement( cst.Index( - cst.Subscript( - cst.Name("Callable"), - slice=( - cst.SubscriptElement( - cst.Index( - cst.List( - [ - cst.Element( - # MatchIfTrue takes in the original node type, - # and returns a boolean. So, lets convert our - # quoted classes (forward refs to other - # matchers) back to the CSTNode they refer to. - # We can do this because there's always a 1:1 - # name mapping. - _convert_match_nodes_to_cst_nodes( - oldtype - ) - ) - ] - ) - ) - ), - cst.SubscriptElement(cst.Index(cst.Name("bool"))), - ), - ) - ) + # MatchIfTrue takes in the original node type, + # and returns a boolean. So, lets convert our + # quoted classes (forward refs to other + # matchers) back to the CSTNode they refer to. + # We can do this because there's always a 1:1 + # name mapping. + _convert_match_nodes_to_cst_nodes(oldtype) + ), ), ), ) @@ -196,9 +283,9 @@ class AddWildcardsToSequenceUnions(cst.CSTTransformer): # type blocks, even for sequence types. return if len(node.slice) != 1: - raise Exception( + raise ValueError( "Unexpected number of sequence elements inside Sequence type " - + "annotation!" + "annotation!" ) nodeslice = node.slice[0].slice if isinstance(nodeslice, cst.Index): @@ -281,7 +368,9 @@ def _get_raw_name(node: cst.CSTNode) -> Optional[str]: if isinstance(node, cst.Name): return node.value elif isinstance(node, cst.SimpleString): - return node.evaluated_value + evaluated_value = node.evaluated_value + if isinstance(evaluated_value, str): + return evaluated_value elif isinstance(node, cst.SubscriptElement): return _get_raw_name(node.slice) elif isinstance(node, cst.Index): @@ -360,10 +449,14 @@ def _get_clean_type_from_subscript( if typecst.value.deep_equals(cst.Name("Sequence")): # Lets attempt to widen the sequence type and alias it. if len(typecst.slice) != 1: - raise Exception("Logic error, Sequence shouldn't have more than one param!") + raise CSTLogicError( + "Logic error, Sequence shouldn't have more than one param!" + ) inner_type = typecst.slice[0].slice if not isinstance(inner_type, cst.Index): - raise Exception("Logic error, expecting Index for only Sequence element!") + raise CSTLogicError( + "Logic error, expecting Index for only Sequence element!" + ) inner_type = inner_type.value if isinstance(inner_type, cst.Subscript): @@ -371,7 +464,9 @@ def _get_clean_type_from_subscript( elif isinstance(inner_type, (cst.Name, cst.SimpleString)): clean_inner_type = _get_clean_type_from_expression(aliases, inner_type) else: - raise Exception("Logic error, unexpected type in Sequence!") + raise CSTLogicError( + f"Logic error, unexpected type in Sequence: {type(inner_type)}!" + ) return _get_wrapped_union_type( typecst.deep_replace(inner_type, clean_inner_type), @@ -396,13 +491,16 @@ def _get_clean_type_and_aliases( # First, get the type as a parseable expression. typestr = repr(typeobj) - if typestr.startswith(""): - typestr = typestr[8:-2] + typestr = re.sub(CLASS_RE, r"\1", typestr) + typestr = re.sub(OPTIONAL_RE, r"typing.Optional[\1]", typestr) # Now, parse the expression with LibCST. - cleanser = CleanseFullTypeNames() + typecst = parse_expression(typestr) - typecst = typecst.visit(cleanser) + typecst = typecst.visit(NormalizeUnions()) + assert isinstance(typecst, cst.BaseExpression) + typecst = typecst.visit(CleanseFullTypeNames()) + assert isinstance(typecst, cst.BaseExpression) aliases: List[Alias] = [] # Now, convert the type to allow for MetadataMatchType and MatchIfTrue values. @@ -411,7 +509,7 @@ def _get_clean_type_and_aliases( elif isinstance(typecst, (cst.Name, cst.SimpleString)): clean_type = _get_clean_type_from_expression(aliases, typecst) else: - raise Exception("Logic error, unexpected top level type!") + raise CSTLogicError(f"Logic error, unexpected top level type: {type(typecst)}!") # Now, insert OneOf/AllOf and MatchIfTrue into unions so we can typecheck their usage. # This allows us to put OneOf[SomeType] or MatchIfTrue[cst.SomeType] into any @@ -447,7 +545,7 @@ def _get_fields(node: Type[cst.CSTNode]) -> Generator[Field, None, None]: all_exports: Set[str] = set() generated_code: List[str] = [] -generated_code.append("# Copyright (c) Facebook, Inc. and its affiliates.") +generated_code.append("# Copyright (c) Meta Platforms, Inc. and affiliates.") generated_code.append("#") generated_code.append( "# This source code is licensed under the MIT license found in the" @@ -457,8 +555,7 @@ generated_code.append("") generated_code.append("") generated_code.append("# This file was generated by libcst.codegen.gen_matcher_classes") generated_code.append("from dataclasses import dataclass") -generated_code.append("from typing import Callable, Sequence, Union") -generated_code.append("from typing_extensions import Literal") +generated_code.append("from typing import Literal, Optional, Sequence, Union") generated_code.append("import libcst as cst") generated_code.append("") generated_code.append( @@ -563,7 +660,7 @@ for node in all_libcst_nodes: # Make sure to add an __all__ for flake8 and compatibility with "from libcst.matchers import *" -generated_code.append(f"__all__ = {repr(sorted(list(all_exports)))}") +generated_code.append(f"__all__ = {repr(sorted(all_exports))}") if __name__ == "__main__": diff --git a/libcst/codegen/gen_type_mapping.py b/libcst/codegen/gen_type_mapping.py index 2f6b2a9d..cc31783d 100644 --- a/libcst/codegen/gen_type_mapping.py +++ b/libcst/codegen/gen_type_mapping.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,9 +7,8 @@ from typing import List from libcst.codegen.gather import imports, nodebases, nodeuses - generated_code: List[str] = [] -generated_code.append("# Copyright (c) Facebook, Inc. and its affiliates.") +generated_code.append("# Copyright (c) Meta Platforms, Inc. and affiliates.") generated_code.append("#") generated_code.append( "# This source code is licensed under the MIT license found in the" @@ -30,7 +29,7 @@ generated_code.append("") generated_code.append("") for module, objects in imports.items(): generated_code.append(f"from {module} import (") - generated_code.append(f" {', '.join(sorted(list(objects)))}") + generated_code.append(f" {', '.join(sorted(objects))}") generated_code.append(")") # Generate the base visit_ methods diff --git a/libcst/codegen/gen_visitor_functions.py b/libcst/codegen/gen_visitor_functions.py index d9a9401b..36d21a5e 100644 --- a/libcst/codegen/gen_visitor_functions.py +++ b/libcst/codegen/gen_visitor_functions.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,9 +8,8 @@ from typing import List from libcst.codegen.gather import imports, nodebases, nodeuses - generated_code: List[str] = [] -generated_code.append("# Copyright (c) Facebook, Inc. and its affiliates.") +generated_code.append("# Copyright (c) Meta Platforms, Inc. and affiliates.") generated_code.append("#") generated_code.append( "# This source code is licensed under the MIT license found in the" @@ -21,6 +20,7 @@ generated_code.append("") generated_code.append("# This file was generated by libcst.codegen.gen_matcher_classes") generated_code.append("from typing import Optional, Union, TYPE_CHECKING") generated_code.append("") +generated_code.append("from libcst._flatten_sentinel import FlattenSentinel") generated_code.append("from libcst._maybe_sentinel import MaybeSentinel") generated_code.append("from libcst._removal_sentinel import RemovalSentinel") generated_code.append("from libcst._typed_visitor_base import mark_no_op") @@ -32,7 +32,7 @@ generated_code.append("") generated_code.append("if TYPE_CHECKING:") for module, objects in imports.items(): generated_code.append(f" from {module} import ( # noqa: F401") - generated_code.append(f" {', '.join(sorted(list(objects)))}") + generated_code.append(f" {', '.join(sorted(objects))}") generated_code.append(" )") @@ -87,7 +87,6 @@ for node in sorted(nodebases.keys(), key=lambda node: node.__name__): generated_code.append("") generated_code.append("") generated_code.append("class CSTTypedTransformerFunctions(CSTTypedBaseFunctions):") -generated_code.append(" pass") for node in sorted(nodebases.keys(), key=lambda node: node.__name__): name = node.__name__ if name.startswith("Base"): @@ -99,12 +98,11 @@ for node in sorted(nodebases.keys(), key=lambda node: node.__name__): base_uses = nodeuses[nodebases[node]] if node_uses.maybe or base_uses.maybe: valid_return_types.append("MaybeSentinel") - if ( - node_uses.optional - or node_uses.sequence - or base_uses.optional - or base_uses.sequence - ): + + if node_uses.sequence or base_uses.sequence: + valid_return_types.append(f'FlattenSentinel["{nodebases[node].__name__}"]') + valid_return_types.append("RemovalSentinel") + elif node_uses.optional or base_uses.optional: valid_return_types.append("RemovalSentinel") generated_code.append( @@ -112,6 +110,7 @@ for node in sorted(nodebases.keys(), key=lambda node: node.__name__): ) generated_code.append(" return updated_node") + if __name__ == "__main__": # Output the code print("\n".join(generated_code)) diff --git a/libcst/codegen/generate.py b/libcst/codegen/generate.py index 60a952f2..92f13176 100644 --- a/libcst/codegen/generate.py +++ b/libcst/codegen/generate.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -25,9 +25,11 @@ from libcst.codegen.transforms import ( def format_file(fname: str) -> None: - with open(os.devnull, "w") as devnull: - subprocess.check_call(["isort", "-q", fname], stdout=devnull, stderr=devnull) - subprocess.check_call(["black", fname], stdout=devnull, stderr=devnull) + subprocess.check_call( + ["ufmt", "format", fname], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) def clean_generated_code(code: str) -> str: @@ -66,12 +68,11 @@ def codegen_visitors() -> None: # Now, see if the file we generated causes any import errors # by attempting to run codegen again in a new process. - with open(os.devnull, "w") as devnull: - subprocess.check_call( - ["python3", "-m", "libcst.codegen.gen_visitor_functions"], - cwd=base, - stdout=devnull, - ) + subprocess.check_call( + [sys.executable, "-m", "libcst.codegen.gen_visitor_functions"], + cwd=base, + stdout=subprocess.DEVNULL, + ) # If it worked, lets format the file format_file(visitors_file) @@ -145,11 +146,16 @@ def main(cli_args: List[str]) -> int: parser = argparse.ArgumentParser(description="Generate code for libcst.") parser.add_argument( "system", - metavar="SYSTEM", - help='System to generate code for. Valid values include: "visitors", "matchers"', + choices=["all", "visitors", "matchers", "return_types"], + help="System to generate code for.", type=str, ) args = parser.parse_args(cli_args) + if args.system == "all": + codegen_visitors() + codegen_matchers() + codegen_return_types() + return 0 if args.system == "visitors": codegen_visitors() return 0 diff --git a/libcst/codegen/tests/__init__.py b/libcst/codegen/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/codegen/tests/__init__.py +++ b/libcst/codegen/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codegen/tests/test_codegen_clean.py b/libcst/codegen/tests/test_codegen_clean.py index dad5166e..5ee5903d 100644 --- a/libcst/codegen/tests/test_codegen_clean.py +++ b/libcst/codegen/tests/test_codegen_clean.py @@ -1,8 +1,9 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. +import difflib import os import os.path @@ -14,15 +15,36 @@ from libcst.testing.utils import UnitTest class TestCodegenClean(UnitTest): + def assert_code_matches( + self, + old_code: str, + new_code: str, + module_name: str, + ) -> None: + if old_code != new_code: + diff = difflib.unified_diff( + old_code.splitlines(keepends=True), + new_code.splitlines(keepends=True), + fromfile="old_code", + tofile="new_code", + ) + diff_str = "".join(diff) + self.fail( + f"{module_name} needs new codegen, see " + + "`python -m libcst.codegen.generate --help` " + + "for instructions, or run `python -m libcst.codegen.generate all`. " + + f"Diff:\n{diff_str}" + ) + def test_codegen_clean_visitor_functions(self) -> None: """ Verifies that codegen of visitor functions would not result in a - changed file. If this test fails, please run 'tox -e codegen' to - generate new files. + changed file. If this test fails, please run 'python -m libcst.codegen.generate all' + to generate new files. """ new_code = clean_generated_code("\n".join(visitor_codegen.generated_code)) new_file = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "visitor_codegen.py.deleteme" + os.path.dirname(os.path.abspath(__file__)), "visitor_codegen.deleteme.py" ) with open(new_file, "w") as fp: fp.write(new_code) @@ -44,19 +66,17 @@ class TestCodegenClean(UnitTest): old_code = fp.read() # Now that we've done simple codegen, verify that it matches. - self.assertTrue( - old_code == new_code, "libcst._typed_visitor needs new codegen!" - ) + self.assert_code_matches(old_code, new_code, "libcst._typed_visitor") def test_codegen_clean_matcher_classes(self) -> None: """ Verifies that codegen of matcher classes would not result in a - changed file. If this test fails, please run 'tox -e codegen' to - generate new files. + changed file. If this test fails, please run 'python -m libcst.codegen.generate all' + to generate new files. """ new_code = clean_generated_code("\n".join(matcher_codegen.generated_code)) new_file = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "matcher_codegen.py.deleteme" + os.path.dirname(os.path.abspath(__file__)), "matcher_codegen.deleteme.py" ) with open(new_file, "w") as fp: fp.write(new_code) @@ -78,19 +98,17 @@ class TestCodegenClean(UnitTest): old_code = fp.read() # Now that we've done simple codegen, verify that it matches. - self.assertTrue( - old_code == new_code, "libcst.matchers.__init__ needs new codegen!" - ) + self.assert_code_matches(old_code, new_code, "libcst.matchers.__init__") def test_codegen_clean_return_types(self) -> None: """ Verifies that codegen of return types would not result in a - changed file. If this test fails, please run 'tox -e codegen' to - generate new files. + changed file. If this test fails, please run 'python -m libcst.codegen.generate all' + to generate new files. """ new_code = clean_generated_code("\n".join(type_codegen.generated_code)) new_file = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "type_codegen.py.deleteme" + os.path.dirname(os.path.abspath(__file__)), "type_codegen.deleteme.py" ) with open(new_file, "w") as fp: fp.write(new_code) @@ -113,6 +131,51 @@ class TestCodegenClean(UnitTest): old_code = fp.read() # Now that we've done simple codegen, verify that it matches. - self.assertTrue( - old_code == new_code, "libcst.matchers._return_types needs new codegen!" + self.assert_code_matches(old_code, new_code, "libcst.matchers._return_types") + + def test_normalize_unions(self) -> None: + """ + Verifies that NormalizeUnions correctly converts binary operations with | + into Union types, with special handling for Optional cases. + """ + import libcst as cst + from libcst.codegen.gen_matcher_classes import NormalizeUnions + + def assert_transforms_to(input_code: str, expected_code: str) -> None: + input_cst = cst.parse_expression(input_code) + expected_cst = cst.parse_expression(expected_code) + + result = input_cst.visit(NormalizeUnions()) + assert isinstance( + result, cst.BaseExpression + ), f"Expected BaseExpression, got {type(result)}" + + result_code = cst.Module(body=()).code_for_node(result) + expected_code_str = cst.Module(body=()).code_for_node(expected_cst) + + self.assertEqual( + result_code, + expected_code_str, + f"Expected {expected_code_str}, got {result_code}", + ) + + # Test regular union case + assert_transforms_to("foo | bar | baz", "typing.Union[foo, bar, baz]") + + # Test Optional case (None on right) + assert_transforms_to("foo | None", "typing.Optional[foo]") + + # Test Optional case (None on left) + assert_transforms_to("None | foo", "typing.Optional[foo]") + + # Test case with more than 2 operands including None (should remain Union) + assert_transforms_to("foo | bar | None", "typing.Union[foo, bar, None]") + + # Flatten existing Union types + assert_transforms_to( + "typing.Union[foo, typing.Union[bar, baz]]", "typing.Union[foo, bar, baz]" + ) + # Merge two kinds of union types + assert_transforms_to( + "foo | typing.Union[bar, baz]", "typing.Union[foo, bar, baz]" ) diff --git a/libcst/codegen/transforms.py b/libcst/codegen/transforms.py index 61bbddf5..22f9058e 100644 --- a/libcst/codegen/transforms.py +++ b/libcst/codegen/transforms.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/__init__.py b/libcst/codemod/__init__.py index b2b2feab..140b8ef8 100644 --- a/libcst/codemod/__init__.py +++ b/libcst/codemod/__init__.py @@ -1,14 +1,14 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod._cli import ( - ParallelTransformResult, diff_code, exec_transform_with_prettyprint, gather_files, parallel_exec_transform_with_prettyprint, + ParallelTransformResult, ) from libcst.codemod._codemod import Codemod from libcst.codemod._command import ( @@ -20,17 +20,16 @@ from libcst.codemod._context import CodemodContext from libcst.codemod._runner import ( SkipFile, SkipReason, + transform_module, TransformExit, TransformFailure, TransformResult, TransformSkip, TransformSuccess, - transform_module, ) from libcst.codemod._testing import CodemodTest from libcst.codemod._visitor import ContextAwareTransformer, ContextAwareVisitor - __all__ = [ "Codemod", "CodemodContext", diff --git a/libcst/codemod/_cli.py b/libcst/codemod/_cli.py index 985684c3..d9c70d05 100644 --- a/libcst/codemod/_cli.py +++ b/libcst/codemod/_cli.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,33 +8,38 @@ Provides helpers for CLI interaction. """ import difflib +import functools import os.path import re import subprocess import sys import time import traceback -from dataclasses import dataclass, replace -from multiprocessing import Pool, cpu_count +from concurrent.futures import as_completed, Executor +from copy import deepcopy +from dataclasses import dataclass +from multiprocessing import cpu_count from pathlib import Path -from typing import Any, AnyStr, Dict, List, Optional, Sequence, Union, cast +from typing import AnyStr, Callable, cast, Dict, List, Optional, Sequence, Type, Union +from warnings import warn -from libcst import PartialParserConfig, parse_module +from libcst import parse_module, PartialParserConfig from libcst.codemod._codemod import Codemod -from libcst.codemod._dummy_pool import DummyPool +from libcst.codemod._context import CodemodContext +from libcst.codemod._dummy_pool import DummyExecutor from libcst.codemod._runner import ( SkipFile, SkipReason, + transform_module, TransformExit, TransformFailure, TransformResult, TransformSkip, TransformSuccess, - transform_module, ) +from libcst.helpers import calculate_module_and_package from libcst.metadata import FullRepoManager - _DEFAULT_GENERATED_CODE_MARKER: str = f"@gen{''}erated" @@ -46,7 +51,7 @@ def invoke_formatter(formatter_args: Sequence[str], code: AnyStr) -> AnyStr: # Make sure there is something to run if len(formatter_args) == 0: - raise Exception("No formatter configured but code formatting requested.") + raise ValueError("No formatter configured but code formatting requested.") # Invoke the formatter, giving it the code as stdin and assuming the formatted # code comes from stdout. @@ -55,7 +60,6 @@ def invoke_formatter(formatter_args: Sequence[str], code: AnyStr) -> AnyStr: AnyStr, subprocess.check_output( formatter_args, - env={}, input=code, universal_newlines=not work_with_bytes, encoding=None if work_with_bytes else "utf-8", @@ -91,7 +95,10 @@ def gather_files( ret.extend( str(p) for p in Path(fd).rglob("*.py*") - if str(p).endswith("py") or (include_stubs and str(p).endswith("pyi")) + if Path.is_file(p) + and ( + str(p).endswith("py") or (include_stubs and str(p).endswith("pyi")) + ) ) return sorted(ret) @@ -162,19 +169,19 @@ def exec_transform_with_prettyprint( return code result = transform_module(transform, code, python_version=python_version) - code: Optional[str] = ( + maybe_code: Optional[str] = ( None if isinstance(result, (TransformFailure, TransformExit, TransformSkip)) else result.code ) - if code is not None and format_code: + if maybe_code is not None and format_code: try: - code = invoke_formatter(formatter_args, code) + maybe_code = invoke_formatter(formatter_args, maybe_code) except Exception as ex: # Failed to format code, treat as a failure and make sure that # we print the exception for debugging. - code = None + maybe_code = None result = TransformFailure( error=ex, traceback_str=traceback.format_exc(), @@ -183,36 +190,7 @@ def exec_transform_with_prettyprint( # Finally, print the output, regardless of what happened print_execution_result(result) - return code - - -def _calculate_module(repo_root: Optional[str], filename: str) -> Optional[str]: - # Given an absolute repo_root and an absolute filename, calculate the - # python module name for the file. - if repo_root is None: - # We don't have a repo root, so this is impossible to calculate. - return None - - # Make sure the absolute path for the root ends in a separator. - if repo_root[-1] != os.path.sep: - repo_root = repo_root + os.path.sep - - if not filename.startswith(repo_root): - # This file seems to be out of the repo root. - return None - - # Get the relative path, get rid of any special cases and extensions. - relative_filename = filename[len(repo_root) :] - for ending in [ - f"{os.path.sep}__init__.py", - f"{os.path.sep}__main__.py", - ".py", - ]: - if relative_filename.endswith(ending): - relative_filename = relative_filename[: -len(ending)] - - # Now, convert all line separators to dots to represent the python module. - return relative_filename.replace(os.path.sep, ".") + return maybe_code @dataclass(frozen=True) @@ -237,11 +215,52 @@ class ExecutionConfig: unified_diff: Optional[int] = None -def _execute_transform( # noqa: C901 - transformer: Codemod, +def _prepare_context( + repo_root: str, filename: str, - config: ExecutionConfig, -) -> ExecutionResult: + scratch: Dict[str, object], + repo_manager: Optional[FullRepoManager], +) -> CodemodContext: + # determine the module and package name for this file + try: + module_name_and_package = calculate_module_and_package(repo_root, filename) + mod_name = module_name_and_package.name + pkg_name = module_name_and_package.package + except ValueError as ex: + print(f"Failed to determine module name for {filename}: {ex}", file=sys.stderr) + mod_name = None + pkg_name = None + return CodemodContext( + scratch=scratch, + filename=filename, + full_module_name=mod_name, + full_package_name=pkg_name, + metadata_manager=repo_manager, + ) + + +def _instantiate_transformer( + transformer: Union[Codemod, Type[Codemod]], + repo_root: str, + filename: str, + original_scratch: Dict[str, object], + codemod_kwargs: Dict[str, object], + repo_manager: Optional[FullRepoManager], +) -> Codemod: + if isinstance(transformer, type): + return transformer( # type: ignore + context=_prepare_context(repo_root, filename, {}, repo_manager), + **codemod_kwargs, + ) + transformer.context = _prepare_context( + repo_root, filename, deepcopy(original_scratch), repo_manager + ) + return transformer + + +def _check_for_skip( + filename: str, config: ExecutionConfig +) -> Union[ExecutionResult, bytes]: for pattern in config.blacklist_patterns: if re.fullmatch(pattern, filename): return ExecutionResult( @@ -253,32 +272,46 @@ def _execute_transform( # noqa: C901 ), ) - try: - with open(filename, "rb") as fp: - oldcode = fp.read() + with open(filename, "rb") as fp: + oldcode = fp.read() - # Skip generated files - if ( - not config.include_generated - and config.generated_code_marker.encode("utf-8") in oldcode - ): - return ExecutionResult( - filename=filename, - changed=False, - transform_result=TransformSkip( - skip_reason=SkipReason.GENERATED, - skip_description="Generated file.", - ), - ) - - # Somewhat gross hack to provide the filename in the transform's context. - # We do this after the fork so that a context that was initialized with - # some defaults before calling parallel_exec_transform_with_prettyprint - # will be updated per-file. - transformer.context = replace( - transformer.context, + # Skip generated files + if ( + not config.include_generated + and config.generated_code_marker.encode("utf-8") in oldcode + ): + return ExecutionResult( filename=filename, - full_module_name=_calculate_module(config.repo_root, filename), + changed=False, + transform_result=TransformSkip( + skip_reason=SkipReason.GENERATED, + skip_description="Generated file.", + ), + ) + return oldcode + + +def _execute_transform( + transformer: Union[Codemod, Type[Codemod]], + filename: str, + config: ExecutionConfig, + original_scratch: Dict[str, object], + codemod_args: Optional[Dict[str, object]], + repo_manager: Optional[FullRepoManager], +) -> ExecutionResult: + warnings: list[str] = [] + try: + oldcode = _check_for_skip(filename, config) + if isinstance(oldcode, ExecutionResult): + return oldcode + + transformer_instance = _instantiate_transformer( + transformer, + config.repo_root or ".", + filename, + original_scratch, + codemod_args or {}, + repo_manager, ) # Run the transform, bail if we failed or if we aren't formatting code @@ -291,55 +324,26 @@ def _execute_transform( # noqa: C901 else PartialParserConfig() ), ) - output_tree = transformer.transform_module(input_tree) + output_tree = transformer_instance.transform_module(input_tree) newcode = output_tree.bytes encoding = output_tree.encoding - except KeyboardInterrupt: - return ExecutionResult( - filename=filename, changed=False, transform_result=TransformExit() - ) + warnings.extend(transformer_instance.context.warnings) except SkipFile as ex: + warnings.extend(transformer_instance.context.warnings) return ExecutionResult( filename=filename, changed=False, transform_result=TransformSkip( skip_reason=SkipReason.OTHER, skip_description=str(ex), - warning_messages=transformer.context.warnings, - ), - ) - except Exception as ex: - return ExecutionResult( - filename=filename, - changed=False, - transform_result=TransformFailure( - error=ex, - traceback_str=traceback.format_exc(), - warning_messages=transformer.context.warnings, + warning_messages=warnings, ), ) # Call formatter if needed, but only if we actually changed something in this # file if config.format_code and newcode != oldcode: - try: - newcode = invoke_formatter(config.formatter_args, newcode) - except KeyboardInterrupt: - return ExecutionResult( - filename=filename, - changed=False, - transform_result=TransformExit(), - ) - except Exception as ex: - return ExecutionResult( - filename=filename, - changed=False, - transform_result=TransformFailure( - error=ex, - traceback_str=traceback.format_exc(), - warning_messages=transformer.context.warnings, - ), - ) + newcode = invoke_formatter(config.formatter_args, newcode) # Format as unified diff if needed, otherwise save it back changed = oldcode != newcode @@ -362,13 +366,14 @@ def _execute_transform( # noqa: C901 return ExecutionResult( filename=filename, changed=changed, - transform_result=TransformSuccess( - warning_messages=transformer.context.warnings, code=newcode - ), + transform_result=TransformSuccess(warning_messages=warnings, code=newcode), ) + except KeyboardInterrupt: return ExecutionResult( - filename=filename, changed=False, transform_result=TransformExit() + filename=filename, + changed=False, + transform_result=TransformExit(warning_messages=warnings), ) except Exception as ex: return ExecutionResult( @@ -377,7 +382,7 @@ def _execute_transform( # noqa: C901 transform_result=TransformFailure( error=ex, traceback_str=traceback.format_exc(), - warning_messages=transformer.context.warnings, + warning_messages=warnings, ), ) @@ -430,7 +435,7 @@ class Progress: operations still to do. """ - if files_finished <= 0: + if files_finished <= 0 or elapsed_seconds == 0: # Technically infinite but calculating sounds better. return "[calculating]" @@ -488,7 +493,7 @@ def _print_parallel_result( ) # In unified diff mode, the code is a diff we must print. - if unified_diff: + if unified_diff and result.code: print(result.code) @@ -514,15 +519,8 @@ class ParallelTransformResult: skips: int -# Unfortunate wrapper required since there is no `istarmap_unordered`... -def _execute_transform_wrap( - job: Dict[str, Any], -) -> ExecutionResult: - return _execute_transform(**job) - - def parallel_exec_transform_with_prettyprint( # noqa: C901 - transform: Codemod, + transform: Union[Codemod, Type[Codemod]], files: Sequence[str], *, jobs: Optional[int] = None, @@ -538,53 +536,69 @@ def parallel_exec_transform_with_prettyprint( # noqa: C901 blacklist_patterns: Sequence[str] = (), python_version: Optional[str] = None, repo_root: Optional[str] = None, + codemod_args: Optional[Dict[str, object]] = None, ) -> ParallelTransformResult: """ - Given a list of files and an instantiated codemod we should apply to them, - fork and apply the codemod in parallel to all of the files, including any - configured formatter. The ``jobs`` parameter controls the maximum number of - in-flight transforms, and needs to be at least 1. If not included, the number - of jobs will automatically be set to the number of CPU cores. If ``unified_diff`` - is set to a number, changes to files will be printed to stdout with - ``unified_diff`` lines of context. If it is set to ``None`` or left out, files - themselves will be updated with changes and formatting. If a - ``python_version`` is provided, then we will parse each source file using - this version. Otherwise, we will use the version of the currently executing python + Given a list of files and a codemod we should apply to them, fork and apply the + codemod in parallel to all of the files, including any configured formatter. The + ``jobs`` parameter controls the maximum number of in-flight transforms, and needs to + be at least 1. If not included, the number of jobs will automatically be set to the + number of CPU cores. If ``unified_diff`` is set to a number, changes to files will + be printed to stdout with ``unified_diff`` lines of context. If it is set to + ``None`` or left out, files themselves will be updated with changes and formatting. + If a ``python_version`` is provided, then we will parse each source file using this + version. Otherwise, we will use the version of the currently executing python binary. - A progress indicator as well as any generated warnings will be printed to stderr. - To supress the interactive progress indicator, set ``hide_progress`` to ``True``. - Files that include the generated code marker will be skipped unless the - ``include_generated`` parameter is set to ``True``. Similarly, files that match - a supplied blacklist of regex patterns will be skipped. Warnings for skipping - both blacklisted and generated files will be printed to stderr along with - warnings generated by the codemod unless ``hide_blacklisted`` and - ``hide_generated`` are set to ``True``. Files that were successfully codemodded - will not be printed to stderr unless ``show_successes`` is set to ``True``. + A progress indicator as well as any generated warnings will be printed to stderr. To + supress the interactive progress indicator, set ``hide_progress`` to ``True``. Files + that include the generated code marker will be skipped unless the + ``include_generated`` parameter is set to ``True``. Similarly, files that match a + supplied blacklist of regex patterns will be skipped. Warnings for skipping both + blacklisted and generated files will be printed to stderr along with warnings + generated by the codemod unless ``hide_blacklisted`` and ``hide_generated`` are set + to ``True``. Files that were successfully codemodded will not be printed to stderr + unless ``show_successes`` is set to ``True``. - To make this API possible, we take an instantiated transform. This is due to - the fact that lambdas are not pickleable and pickling functions is undefined. - This means we're implicitly relying on fork behavior on UNIX-like systems, and - this function will not work on Windows systems. To create a command-line utility - that runs on Windows, please instead see - :func:`~libcst.codemod.exec_transform_with_prettyprint`. + We take a :class:`~libcst.codemod._codemod.Codemod` class, or an instantiated + :class:`~libcst.codemod._codemod.Codemod`. In the former case, the codemod will be + instantiated for each file, with ``codemod_args`` passed in to the constructor. + Passing an already instantiated :class:`~libcst.codemod._codemod.Codemod` is + deprecated, because it leads to sharing of the + :class:`~libcst.codemod._codemod.Codemod` instance across files, which is a common + source of hard-to-track-down bugs when the :class:`~libcst.codemod._codemod.Codemod` + tracks its state on the instance. """ + if isinstance(transform, Codemod): + warn( + "Passing transformer instances to `parallel_exec_transform_with_prettyprint` " + "is deprecated and will break in a future version. " + "Please pass the transformer class instead.", + DeprecationWarning, + stacklevel=2, + ) + # Ensure that we have no duplicates, otherwise we might get race conditions # on write. - files = sorted(list({os.path.abspath(f) for f in files})) + files = sorted({os.path.abspath(f) for f in files}) total = len(files) progress = Progress(enabled=not hide_progress, total=total) + chunksize = 4 # Grab number of cores if we need to - jobs: int = jobs if jobs is not None else cpu_count() + jobs = min( + jobs if jobs is not None else cpu_count(), + (len(files) + chunksize - 1) // chunksize, + ) if jobs < 1: - raise Exception("Must have at least one job to process!") + raise ValueError("Must have at least one job to process!") if total == 0: return ParallelTransformResult(successes=0, failures=0, skips=0, warnings=0) + metadata_manager: Optional[FullRepoManager] = None if repo_root is not None: # Make sure if there is a root that we have the absolute path to it. repo_root = os.path.abspath(repo_root) @@ -597,10 +611,7 @@ def parallel_exec_transform_with_prettyprint( # noqa: C901 transform.get_inherited_dependencies(), ) metadata_manager.resolve_cache() - transform.context = replace( - transform.context, - metadata_manager=metadata_manager, - ) + print("Executing codemod...", file=sys.stderr) config = ExecutionConfig( @@ -614,13 +625,16 @@ def parallel_exec_transform_with_prettyprint( # noqa: C901 python_version=python_version, ) - if total == 1: + pool_impl: Callable[[], Executor] + if total == 1 or jobs == 1: # Simple case, we should not pay for process overhead. - # Let's just use a dummy synchronous pool. + # Let's just use a dummy synchronous executor. jobs = 1 - pool_impl = DummyPool - else: - pool_impl = Pool + pool_impl = DummyExecutor + elif getattr(sys, "_is_gil_enabled", lambda: True)(): # pyre-ignore[16] + from concurrent.futures import ProcessPoolExecutor + + pool_impl = functools.partial(ProcessPoolExecutor, max_workers=jobs) # Warm the parser, pre-fork. parse_module( "", @@ -630,23 +644,35 @@ def parallel_exec_transform_with_prettyprint( # noqa: C901 else PartialParserConfig() ), ) + else: + from concurrent.futures import ThreadPoolExecutor + + pool_impl = functools.partial(ThreadPoolExecutor, max_workers=jobs) successes: int = 0 failures: int = 0 warnings: int = 0 skips: int = 0 + original_scratch = ( + deepcopy(transform.context.scratch) if isinstance(transform, Codemod) else {} + ) - with pool_impl(processes=jobs) as p: # type: ignore - args = [ - { - "transformer": transform, - "filename": filename, - "config": config, - } - for filename in files - ] + with pool_impl() as executor: # type: ignore try: - for result in p.imap_unordered(_execute_transform_wrap, args, chunksize=4): + futures = [ + executor.submit( + _execute_transform, + transformer=transform, + filename=filename, + config=config, + original_scratch=original_scratch, + codemod_args=codemod_args, + repo_manager=metadata_manager, + ) + for filename in files + ] + for future in as_completed(futures): + result = future.result() # Print an execution result, keep track of failures _print_parallel_result( result, diff --git a/libcst/codemod/_codemod.py b/libcst/codemod/_codemod.py index bae27674..e267f154 100644 --- a/libcst/codemod/_codemod.py +++ b/libcst/codemod/_codemod.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -56,9 +56,9 @@ class Codemod(MetadataDependent, ABC): """ module = self.context.module if module is None: - raise Exception( + raise ValueError( f"Attempted access of {self.__class__.__name__}.module outside of " - + "transform_module()." + "transform_module()." ) return module @@ -78,7 +78,7 @@ class Codemod(MetadataDependent, ABC): oldwrapper = self.context.wrapper metadata_manager = self.context.metadata_manager filename = self.context.filename - if metadata_manager and filename: + if metadata_manager is not None and filename: # We can look up full-repo metadata for this codemod! cache = metadata_manager.get_cache_for_path(filename) wrapper = MetadataWrapper(module, cache=cache) diff --git a/libcst/codemod/_command.py b/libcst/codemod/_command.py index 1a11e91e..b7784d30 100644 --- a/libcst/codemod/_command.py +++ b/libcst/codemod/_command.py @@ -1,21 +1,22 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # +from __future__ import annotations + import argparse import inspect from abc import ABC, abstractmethod -from typing import Dict, Generator, List, Type, TypeVar +from typing import Dict, Generator, List, Tuple, Type, TypeVar -from libcst import Module +from libcst import CSTNode, Module from libcst.codemod._codemod import Codemod from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareTransformer from libcst.codemod.visitors._add_imports import AddImportsVisitor from libcst.codemod.visitors._remove_imports import RemoveImportsVisitor - _Codemod = TypeVar("_Codemod", bound=Codemod) @@ -66,6 +67,28 @@ class CodemodCommand(Codemod, ABC): """ ... + # Lightweight wrappers for RemoveImportsVisitor static functions + def remove_unused_import( + self, + module: str, + obj: str | None = None, + asname: str | None = None, + ) -> None: + RemoveImportsVisitor.remove_unused_import(self.context, module, obj, asname) + + def remove_unused_import_by_node(self, node: CSTNode) -> None: + RemoveImportsVisitor.remove_unused_import_by_node(self.context, node) + + # Lightweight wrappers for AddImportsVisitor static functions + def add_needed_import( + self, + module: str, + obj: str | None = None, + asname: str | None = None, + relative: int = 0, + ) -> None: + AddImportsVisitor.add_needed_import(self.context, module, obj, asname, relative) + def transform_module(self, tree: Module) -> Module: # Overrides (but then calls) Codemod's transform_module to provide # a spot where additional supported transforms can be attached and run. @@ -76,13 +99,13 @@ class CodemodCommand(Codemod, ABC): # have a static method that other transforms can use which takes # a context and other optional args and modifies its own context key # accordingly. We import them here so that we don't have circular imports. - supported_transforms: Dict[str, Type[Codemod]] = { - AddImportsVisitor.CONTEXT_KEY: AddImportsVisitor, - RemoveImportsVisitor.CONTEXT_KEY: RemoveImportsVisitor, - } + supported_transforms: List[Tuple[str, Type[Codemod]]] = [ + (AddImportsVisitor.CONTEXT_KEY, AddImportsVisitor), + (RemoveImportsVisitor.CONTEXT_KEY, RemoveImportsVisitor), + ] # For any visitors that we support auto-running, run them here if needed. - for key, transform in supported_transforms.items(): + for key, transform in supported_transforms: if key in self.context.scratch: # We have work to do, so lets run this. tree = self._instantiate_and_run(transform, tree) @@ -138,13 +161,11 @@ class MagicArgsCodemodCommand(CodemodCommand, ABC): """ ... - def _instantiate(self, transform: Type[Codemod]) -> Codemod: + def _instantiate(self, transform: Type[_Codemod]) -> _Codemod: # Grab the expected arguments argspec = inspect.getfullargspec(transform.__init__) args: List[object] = [] kwargs: Dict[str, object] = {} - # pyre-fixme[6]: Expected `Sized` for 1st param but got `Union[Tuple[], - # Tuple[Any, ...]]`. last_default_arg = len(argspec.args) - len(argspec.defaults or ()) for i, arg in enumerate(argspec.args): if arg in ["self", "context"]: diff --git a/libcst/codemod/_context.py b/libcst/codemod/_context.py index 98e57adf..47373df4 100644 --- a/libcst/codemod/_context.py +++ b/libcst/codemod/_context.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -44,6 +44,12 @@ class CodemodContext: #: in the repo named ``foo/bar/baz.py``. full_module_name: Optional[str] = None + #: The current package if a codemod is being executed against a file that + #: lives on disk, and the repository root is correctly configured. This + #: Will take the form of a dotted name such as ``foo.bar`` for a file + #: in the repo named ``foo/bar/baz.py`` + full_package_name: Optional[str] = None + #: The current top level metadata wrapper for the module being modified. #: To access computed metadata when inside an actively running codemod, use #: the :meth:`~libcst.MetadataDependent.get_metadata` method on diff --git a/libcst/codemod/_dummy_pool.py b/libcst/codemod/_dummy_pool.py index d92307ce..aa23a7d4 100644 --- a/libcst/codemod/_dummy_pool.py +++ b/libcst/codemod/_dummy_pool.py @@ -1,35 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import sys +from concurrent.futures import Executor, Future from types import TracebackType -from typing import Callable, Generator, Iterable, Optional, Type, TypeVar +from typing import Callable, Optional, Type, TypeVar + +if sys.version_info >= (3, 10): + from typing import ParamSpec +else: + from typing_extensions import ParamSpec + +Return = TypeVar("Return") +Params = ParamSpec("Params") -RetT = TypeVar("RetT") -ArgT = TypeVar("ArgT") - - -class DummyPool: +class DummyExecutor(Executor): """ - Synchronous dummy `multiprocessing.Pool` analogue. + Synchronous dummy `concurrent.futures.Executor` analogue. """ - def __init__(self, processes: Optional[int] = None) -> None: - pass - - def imap_unordered( + def submit( self, - func: Callable[[ArgT], RetT], - iterable: Iterable[ArgT], - chunksize: Optional[int] = None, - ) -> Generator[RetT, None, None]: - for args in iterable: - yield func(args) + fn: Callable[Params, Return], + /, + *args: Params.args, + **kwargs: Params.kwargs, + ) -> Future[Return]: + future: Future[Return] = Future() + try: + result = fn(*args, **kwargs) + future.set_result(result) + except Exception as exc: + future.set_exception(exc) + return future - def __enter__(self) -> "DummyPool": + def __enter__(self) -> "DummyExecutor": return self def __exit__( self, - exc_type: Optional[Type[Exception]], - exc: Optional[Exception], - tb: Optional[TracebackType], + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], ) -> None: pass diff --git a/libcst/codemod/_runner.py b/libcst/codemod/_runner.py index a4b68dd4..4e76a935 100644 --- a/libcst/codemod/_runner.py +++ b/libcst/codemod/_runner.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -13,10 +13,9 @@ from dataclasses import dataclass from enum import Enum from typing import Optional, Sequence, Union -from libcst import PartialParserConfig, parse_module +from libcst import parse_module, PartialParserConfig from libcst.codemod._codemod import Codemod - # All datastructures defined in this class are pickleable so that they can be used # as a return value with the multiprocessing module. diff --git a/libcst/codemod/_testing.py b/libcst/codemod/_testing.py index 75895b10..fb4d85c2 100644 --- a/libcst/codemod/_testing.py +++ b/libcst/codemod/_testing.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from textwrap import dedent from typing import Optional, Sequence, Type -from libcst import PartialParserConfig, parse_module +from libcst import parse_module, PartialParserConfig from libcst.codemod._codemod import Codemod from libcst.codemod._context import CodemodContext from libcst.codemod._runner import SkipFile @@ -98,6 +98,7 @@ class _CodemodTest: """ context = context_override if context_override is not None else CodemodContext() + # pyre-fixme[45]: Cannot instantiate abstract class `Codemod`. transform_instance = self.TRANSFORM(context, *args, **kwargs) input_tree = parse_module( CodemodTest.make_fixture_data(before), diff --git a/libcst/codemod/_visitor.py b/libcst/codemod/_visitor.py index d368b854..89248838 100644 --- a/libcst/codemod/_visitor.py +++ b/libcst/codemod/_visitor.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from typing import Mapping import libcst as cst -from libcst import MetadataDependent +from libcst import MetadataDependent, MetadataException from libcst.codemod._codemod import Codemod from libcst.codemod._context import CodemodContext from libcst.matchers import MatcherDecoratableTransformer, MatcherDecoratableVisitor @@ -69,14 +69,14 @@ class ContextAwareVisitor(MatcherDecoratableVisitor, MetadataDependent): if dependencies: wrapper = self.context.wrapper if wrapper is None: - raise Exception( + raise MetadataException( f"Attempting to instantiate {self.__class__.__name__} outside of " + "an active transform. This means that metadata hasn't been " + "calculated and we cannot successfully create this visitor." ) for dep in dependencies: if dep not in wrapper._metadata: - raise Exception( + raise MetadataException( f"Attempting to access metadata {dep.__name__} that was not a " + "declared dependency of parent transform! This means it is " + "not possible to compute this value. Please ensure that all " @@ -101,7 +101,7 @@ class ContextAwareVisitor(MatcherDecoratableVisitor, MetadataDependent): """ module = self.context.module if module is None: - raise Exception( + raise ValueError( f"Attempted access of {self.__class__.__name__}.module outside of " + "transform_module()." ) diff --git a/libcst/codemod/commands/__init__.py b/libcst/codemod/commands/__init__.py index 602d2685..aac70d45 100644 --- a/libcst/codemod/commands/__init__.py +++ b/libcst/codemod/commands/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/add_pyre_directive.py b/libcst/codemod/commands/add_pyre_directive.py index 165ebb0b..bee95c61 100644 --- a/libcst/codemod/commands/add_pyre_directive.py +++ b/libcst/codemod/commands/add_pyre_directive.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/add_trailing_commas.py b/libcst/codemod/commands/add_trailing_commas.py new file mode 100644 index 00000000..2f33a4bd --- /dev/null +++ b/libcst/codemod/commands/add_trailing_commas.py @@ -0,0 +1,127 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import textwrap +from typing import Dict, Optional + +import libcst as cst +from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand + + +presets_per_formatter: Dict[str, Dict[str, int]] = { + "black": { + "parameter_count": 1, + "argument_count": 2, + }, + "yapf": { + "parameter_count": 2, + "argument_count": 2, + }, +} + + +class AddTrailingCommas(VisitorBasedCodemodCommand): + DESCRIPTION: str = textwrap.dedent( + """ + Codemod that adds trailing commas to arguments in function + headers and function calls. + + The idea is that both the black and yapf autoformatters will + tend to split headers and function calls so that there + is one parameter / argument per line if there is a trailing + comma: + - Black will always separate them by line + - Yapf appears to do so whenever there are at least two arguments + + Applying this codemod (and then an autoformatter) may make + it easier to read function definitions and calls + """ + ) + + def __init__( + self, + context: CodemodContext, + formatter: str = "black", + parameter_count: Optional[int] = None, + argument_count: Optional[int] = None, + ) -> None: + super().__init__(context) + presets = presets_per_formatter.get(formatter) + if presets is None: + raise ValueError( + f"Unknown formatter {formatter!r}. Presets exist for " + + ", ".join(presets_per_formatter.keys()) + ) + self.parameter_count: int = parameter_count or presets["parameter_count"] + self.argument_count: int = argument_count or presets["argument_count"] + + @staticmethod + def add_args(arg_parser: argparse.ArgumentParser) -> None: + arg_parser.add_argument( + "--formatter", + dest="formatter", + metavar="FORMATTER", + help="Formatter to target (e.g. yapf or black)", + type=str, + default="black", + ) + arg_parser.add_argument( + "--paramter-count", + dest="parameter_count", + metavar="PARAMETER_COUNT", + help="Minimal number of parameters for us to add trailing comma", + type=int, + default=None, + ) + arg_parser.add_argument( + "--argument-count", + dest="argument_count", + metavar="ARGUMENT_COUNT", + help="Minimal number of arguments for us to add trailing comma", + type=int, + default=None, + ) + + def leave_Parameters( + self, + original_node: cst.Parameters, + updated_node: cst.Parameters, + ) -> cst.Parameters: + skip = ( + # + self.parameter_count is None + or len(updated_node.params) < self.parameter_count + or ( + len(updated_node.params) == 1 + and updated_node.params[0].name.value in {"self", "cls"} + ) + ) + if skip: + return updated_node + else: + last_param = updated_node.params[-1] + return updated_node.with_changes( + params=( + *updated_node.params[:-1], + last_param.with_changes(comma=cst.Comma()), + ), + ) + + def leave_Call( + self, + original_node: cst.Call, + updated_node: cst.Call, + ) -> cst.Call: + if len(updated_node.args) < self.argument_count: + return updated_node + else: + last_arg = updated_node.args[-1] + return updated_node.with_changes( + args=( + *updated_node.args[:-1], + last_arg.with_changes(comma=cst.Comma()), + ), + ) diff --git a/libcst/codemod/commands/convert_format_to_fstring.py b/libcst/codemod/commands/convert_format_to_fstring.py index 29e81246..43d19bce 100644 --- a/libcst/codemod/commands/convert_format_to_fstring.py +++ b/libcst/codemod/commands/convert_format_to_fstring.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,6 +9,8 @@ from typing import Generator, List, Optional, Sequence, Set, Tuple import libcst as cst import libcst.matchers as m +from libcst import CSTLogicError +from libcst._exceptions import ParserSyntaxError from libcst.codemod import ( CodemodContext, ContextAwareTransformer, @@ -23,7 +25,7 @@ def _get_lhs(field: cst.BaseExpression) -> cst.BaseExpression: elif isinstance(field, (cst.Attribute, cst.Subscript)): return _get_lhs(field.value) else: - raise Exception("Unsupported node type!") + raise TypeError("Unsupported node type!") def _find_expr_from_field_name( @@ -48,7 +50,7 @@ def _find_expr_from_field_name( if isinstance(lhs, cst.Integer): index = int(lhs.value) if index < 0 or index >= len(args): - raise Exception(f"Logic error, arg sequence {index} out of bounds!") + raise CSTLogicError(f"Logic error, arg sequence {index} out of bounds!") elif isinstance(lhs, cst.Name): for i, arg in enumerate(args): kw = arg.keyword @@ -58,10 +60,12 @@ def _find_expr_from_field_name( index = i break if index is None: - raise Exception(f"Logic error, arg name {lhs.value} out of bounds!") + raise CSTLogicError(f"Logic error, arg name {lhs.value} out of bounds!") if index is None: - raise Exception(f"Logic error, unsupported fieldname expression {fieldname}!") + raise CSTLogicError( + f"Logic error, unsupported fieldname expression {fieldname}!" + ) # Format it! return field_expr.deep_replace(lhs, args[index].value) @@ -141,7 +145,7 @@ def _get_tokens( # noqa: C901 in_brackets -= 1 if in_brackets < 0: - raise Exception("Stray } in format string!") + raise ValueError("Stray } in format string!") if in_brackets == 0: field_name, format_spec, conversion = _get_field(format_accum) @@ -158,9 +162,11 @@ def _get_tokens( # noqa: C901 format_accum += char if in_brackets > 0: - raise Exception("Stray { in format string!") + raise ParserSyntaxError( + "Stray { in format string!", lines=[string], raw_line=0, raw_column=0 + ) if format_accum: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") # Yield the last bit of information yield (prefix, None, None, None) @@ -188,7 +194,7 @@ class SwitchStringQuotesTransformer(ContextAwareTransformer): def __init__(self, context: CodemodContext, avoid_quote: str) -> None: super().__init__(context) if avoid_quote not in {'"', "'"}: - raise Exception("Must specify either ' or \" single quote to avoid.") + raise ValueError("Must specify either ' or \" single quote to avoid.") self.avoid_quote: str = avoid_quote self.replace_quote: str = '"' if avoid_quote == "'" else "'" @@ -219,12 +225,11 @@ class SwitchStringQuotesTransformer(ContextAwareTransformer): class ConvertFormatStringCommand(VisitorBasedCodemodCommand): - DESCRIPTION: str = "Converts instances of str.format() to f-string." @staticmethod - def add_args(parser: argparse.ArgumentParser) -> None: - parser.add_argument( + def add_args(arg_parser: argparse.ArgumentParser) -> None: + arg_parser.add_argument( "--allow-strip-comments", dest="allow_strip_comments", help=( @@ -233,7 +238,7 @@ class ConvertFormatStringCommand(VisitorBasedCodemodCommand): ), action="store_true", ) - parser.add_argument( + arg_parser.add_argument( "--allow-await", dest="allow_await", help=( @@ -271,7 +276,7 @@ class ConvertFormatStringCommand(VisitorBasedCodemodCommand): inserted_sequence: int = 0 stringnode = cst.ensure_type(extraction["string"], cst.SimpleString) tokens = _get_tokens(stringnode.raw_value) - for (literal_text, field_name, format_spec, conversion) in tokens: + for literal_text, field_name, format_spec, conversion in tokens: if literal_text: fstring.append(cst.FormattedStringText(literal_text)) if field_name is None: @@ -297,7 +302,7 @@ class ConvertFormatStringCommand(VisitorBasedCodemodCommand): ) in format_spec_tokens: if spec_format_spec is not None: # This shouldn't be possible, we don't allow it in the spec! - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") if spec_literal_text: format_spec_parts.append( cst.FormattedStringText(spec_literal_text) diff --git a/libcst/codemod/commands/convert_namedtuple_to_dataclass.py b/libcst/codemod/commands/convert_namedtuple_to_dataclass.py index 09935b70..f1de5b0c 100644 --- a/libcst/codemod/commands/convert_namedtuple_to_dataclass.py +++ b/libcst/codemod/commands/convert_namedtuple_to_dataclass.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -25,7 +25,9 @@ class ConvertNamedTupleToDataclassCommand(VisitorBasedCodemodCommand): NamedTuple-specific attributes and methods. """ - DESCRIPTION: str = "Convert NamedTuple class declarations to Python 3.7 dataclasses using the @dataclass decorator." + DESCRIPTION: str = ( + "Convert NamedTuple class declarations to Python 3.7 dataclasses using the @dataclass decorator." + ) METADATA_DEPENDENCIES: Sequence[ProviderT] = (QualifiedNameProvider,) # The 'NamedTuple' we are interested in diff --git a/libcst/codemod/commands/convert_percent_format_to_fstring.py b/libcst/codemod/commands/convert_percent_format_to_fstring.py index 35032719..d74624e4 100644 --- a/libcst/codemod/commands/convert_percent_format_to_fstring.py +++ b/libcst/codemod/commands/convert_percent_format_to_fstring.py @@ -1,16 +1,16 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # +import itertools import re -from typing import Callable, cast +from typing import Callable, cast, List, Sequence import libcst as cst import libcst.matchers as m from libcst.codemod import VisitorBasedCodemodCommand - USE_FSTRING_SIMPLE_EXPRESSION_MAX_LENGTH = 30 @@ -53,12 +53,12 @@ class EscapeStringQuote(cst.CSTTransformer): original_node.prefix + quo + original_node.raw_value + quo ) if escaped_string.evaluated_value != original_node.evaluated_value: - raise Exception( + raise ValueError( f"Failed to escape string:\n original:{original_node.value}\n escaped:{escaped_string.value}" ) else: return escaped_string - raise Exception( + raise ValueError( f"Cannot find a good quote for escaping the SimpleString: {original_node.value}" ) return original_node @@ -74,8 +74,10 @@ class ConvertPercentFormatStringCommand(VisitorBasedCodemodCommand): extracts = m.extract( original_node, m.BinaryOperation( + # pyre-fixme[6]: Expected `Union[m._matcher_base.AllOf[typing.Union[m... left=m.MatchIfTrue(_match_simple_string), operator=m.Modulo(), + # pyre-fixme[6]: Expected `Union[m._matcher_base.AllOf[typing.Union[m... right=m.SaveMatchedNode( m.MatchIfTrue(_gen_match_simple_expression(self.module)), expr_key, @@ -84,7 +86,8 @@ class ConvertPercentFormatStringCommand(VisitorBasedCodemodCommand): ) if extracts: - expr = extracts[expr_key] + exprs = extracts[expr_key] + exprs = (exprs,) if not isinstance(exprs, Sequence) else exprs parts = [] simple_string = cst.ensure_type(original_node.left, cst.SimpleString) innards = simple_string.raw_value.replace("{", "{{").replace("}", "}}") @@ -92,10 +95,15 @@ class ConvertPercentFormatStringCommand(VisitorBasedCodemodCommand): token = tokens[0] if len(token) > 0: parts.append(cst.FormattedStringText(value=token)) - expressions = ( - [elm.value for elm in expr.elements] - if isinstance(expr, cst.Tuple) - else [expr] + expressions: List[cst.CSTNode] = list( + *itertools.chain( + ( + [elm.value for elm in expr.elements] + if isinstance(expr, cst.Tuple) + else [expr] + ) + for expr in exprs + ) ) escape_transformer = EscapeStringQuote(simple_string.quote) i = 1 diff --git a/libcst/codemod/commands/convert_type_comments.py b/libcst/codemod/commands/convert_type_comments.py new file mode 100644 index 00000000..5863d94b --- /dev/null +++ b/libcst/codemod/commands/convert_type_comments.py @@ -0,0 +1,875 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import ast +import builtins +import dataclasses +import functools +import sys +from typing import cast, Dict, List, Optional, Sequence, Set, Tuple, Union + +import libcst as cst +import libcst.matchers as m +from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand + + +@functools.lru_cache() +def _empty_module() -> cst.Module: + return cst.parse_module("") + + +def _code_for_node(node: cst.CSTNode) -> str: + return _empty_module().code_for_node(node) + + +def _ast_for_statement(node: cst.CSTNode) -> ast.stmt: + """ + Get the type-comment-enriched python AST for a node. + + If there are illegal type comments, this can return a SyntaxError. + In that case, return the same node with no type comments (which will + cause this codemod to ignore it). + """ + code = _code_for_node(node) + try: + return ast.parse(code, type_comments=True).body[-1] + except SyntaxError: + return ast.parse(code, type_comments=False).body[-1] + + +def _parse_type_comment( + type_comment: Optional[str], +) -> Optional[ast.expr]: + """ + Attempt to parse a type comment. If it is None or if it fails to parse, + return None. + """ + if type_comment is None: + return None + try: + return ast.parse(type_comment, "", "eval").body + except SyntaxError: + return None + + +def _annotation_for_statement( + node: cst.CSTNode, +) -> Optional[ast.expr]: + return _parse_type_comment(_ast_for_statement(node).type_comment) + + +def _parse_func_type_comment( + func_type_comment: Optional[str], +) -> Optional["ast.FunctionType"]: + if func_type_comment is None: + return None + return ast.parse(func_type_comment, "", "func_type") + + +@functools.lru_cache() +def _builtins() -> Set[str]: + return set(dir(builtins)) + + +def _is_builtin(annotation: str) -> bool: + return annotation in _builtins() + + +def _convert_annotation( + raw: str, + quote_annotations: bool, +) -> cst.Annotation: + """ + Convert a raw annotation - which is a string coming from a type + comment - into a suitable libcst Annotation node. + + If `quote_annotations`, we'll always quote annotations unless they are builtin + types. The reason for this is to make the codemod safer to apply + on legacy code where type comments may well include invalid types + that would crash at runtime. + """ + if _is_builtin(raw): + return cst.Annotation(annotation=cst.Name(value=raw)) + if not quote_annotations: + try: + return cst.Annotation(annotation=cst.parse_expression(raw)) + except cst.ParserSyntaxError: + pass + return cst.Annotation(annotation=cst.SimpleString(f'"{raw}"')) + + +def _is_type_comment(comment: Optional[cst.Comment]) -> bool: + """ + Determine whether a comment is a type comment. + + Unfortunately, to strip type comments in a location-invariant way requires + finding them from pure libcst data. We only use this in function defs, where + the precise cst location of the type comment cna be hard to predict. + """ + if comment is None: + return False + value = comment.value[1:].strip() + if not value.startswith("type:"): + return False + suffix = value.removeprefix("type:").strip().split() + if len(suffix) > 0 and suffix[0] == "ignore": + return False + return True + + +def _strip_type_comment(comment: Optional[cst.Comment]) -> Optional[cst.Comment]: + """ + Remove the type comment while keeping any following comments. + """ + if not _is_type_comment(comment): + return comment + assert comment is not None + idx = comment.value.find("#", 1) + if idx < 0: + return None + return comment.with_changes(value=comment.value[idx:]) + + +class _FailedToApplyAnnotation: + pass + + +class _ArityError(Exception): + pass + + +UnpackedBindings = Union[cst.BaseExpression, List["UnpackedBindings"]] +UnpackedAnnotations = Union[str, List["UnpackedAnnotations"]] +TargetAnnotationPair = Tuple[cst.BaseExpression, str] + + +class AnnotationSpreader: + """ + Utilities to help with lining up tuples of types from type comments with + the tuples of values with which they should be associated. + """ + + @staticmethod + def unpack_annotation( + expression: ast.expr, + ) -> UnpackedAnnotations: + if isinstance(expression, ast.Tuple): + return [ + AnnotationSpreader.unpack_annotation(elt) for elt in expression.elts + ] + else: + return ast.unparse(expression) + + @staticmethod + def unpack_target( + target: cst.BaseExpression, + ) -> UnpackedBindings: + """ + Take a (non-function-type) type comment and split it into + components. A type comment body should always be either a single + type or a tuple of types. + + We work with strings for annotations because without detailed scope + analysis that is the safest option for codemods. + """ + if isinstance(target, cst.Tuple): + return [ + AnnotationSpreader.unpack_target(element.value) + for element in target.elements + ] + else: + return target + + @staticmethod + def annotated_bindings( + bindings: UnpackedBindings, + annotations: UnpackedAnnotations, + ) -> List[Tuple[cst.BaseAssignTargetExpression, str]]: + if isinstance(annotations, list): + if isinstance(bindings, list) and len(bindings) == len(annotations): + # The arities match, so we return the flattened result of + # mapping annotated_bindings over each pair. + out: List[Tuple[cst.BaseAssignTargetExpression, str]] = [] + for binding, annotation in zip(bindings, annotations): + out.extend( + AnnotationSpreader.annotated_bindings(binding, annotation) + ) + return out + else: + # Either mismatched lengths, or multi-type and one-target + raise _ArityError() + elif isinstance(bindings, list): + # multi-target and one-type + raise _ArityError() + else: + assert isinstance(bindings, cst.BaseAssignTargetExpression) + return [(bindings, annotations)] + + @staticmethod + def type_declaration( + binding: cst.BaseAssignTargetExpression, + raw_annotation: str, + quote_annotations: bool, + ) -> cst.AnnAssign: + return cst.AnnAssign( + target=binding, + annotation=_convert_annotation( + raw=raw_annotation, + quote_annotations=quote_annotations, + ), + value=None, + ) + + @staticmethod + def type_declaration_statements( + bindings: UnpackedBindings, + annotations: UnpackedAnnotations, + leading_lines: Sequence[cst.EmptyLine], + quote_annotations: bool, + ) -> List[cst.SimpleStatementLine]: + return [ + cst.SimpleStatementLine( + body=[ + AnnotationSpreader.type_declaration( + binding=binding, + raw_annotation=raw_annotation, + quote_annotations=quote_annotations, + ) + ], + leading_lines=leading_lines if i == 0 else [], + ) + for i, (binding, raw_annotation) in enumerate( + AnnotationSpreader.annotated_bindings( + bindings=bindings, + annotations=annotations, + ) + ) + ] + + +def convert_Assign( + node: cst.Assign, + annotation: ast.expr, + quote_annotations: bool, +) -> Union[ + _FailedToApplyAnnotation, + cst.AnnAssign, + List[Union[cst.AnnAssign, cst.Assign]], +]: + # zip the type and target information tother. If there are mismatched + # arities, this is a PEP 484 violation (technically we could use + # logic beyond the PEP to recover some cases as typing.Tuple, but this + # should be rare) so we give up. + try: + annotations = AnnotationSpreader.unpack_annotation(annotation) + annotated_targets = [ + AnnotationSpreader.annotated_bindings( + bindings=AnnotationSpreader.unpack_target(target.target), + annotations=annotations, + ) + for target in node.targets + ] + except _ArityError: + return _FailedToApplyAnnotation() + if len(annotated_targets) == 1 and len(annotated_targets[0]) == 1: + # We can convert simple one-target assignments into a single AnnAssign + binding, raw_annotation = annotated_targets[0][0] + return cst.AnnAssign( + target=binding, + annotation=_convert_annotation( + raw=raw_annotation, + quote_annotations=quote_annotations, + ), + value=node.value, + semicolon=node.semicolon, + ) + else: + # For multi-target assigns (regardless of whether they are using tuples + # on the LHS or multiple `=` tokens or both), we need to add a type + # declaration per individual LHS target. + type_declarations = [ + AnnotationSpreader.type_declaration( + binding, + raw_annotation, + quote_annotations=quote_annotations, + ) + for annotated_bindings in annotated_targets + for binding, raw_annotation in annotated_bindings + ] + return [ + *type_declarations, + node, + ] + + +@dataclasses.dataclass(frozen=True) +class FunctionTypeInfo: + arguments: Dict[str, Optional[str]] + returns: Optional[str] + + def is_empty(self) -> bool: + return self.returns is None and self.arguments == {} + + @classmethod + def from_cst( + cls, + node_cst: cst.FunctionDef, + is_method: bool, + ) -> "FunctionTypeInfo": + """ + Using the `ast` type comment extraction logic, get type information + for a function definition. + + To understand edge case behavior see the `leave_FunctionDef` docstring. + """ + node_ast = cast(ast.FunctionDef, _ast_for_statement(node_cst)) + # Note: this is guaranteed to have the correct arity. + args = [ + *node_ast.args.posonlyargs, + *node_ast.args.args, + *( + [] + if node_ast.args.vararg is None + else [ + node_ast.args.vararg, + ] + ), + *node_ast.args.kwonlyargs, + *( + [] + if node_ast.args.kwarg is None + else [ + node_ast.args.kwarg, + ] + ), + ] + try: + func_type_annotation = _parse_func_type_comment(node_ast.type_comment) + except SyntaxError: + # On unparsable function type annotations, ignore type information + return cls({}, None) + if func_type_annotation is None: + return cls( + arguments={ + arg.arg: arg.type_comment + for arg in args + if arg.type_comment is not None + }, + returns=None, + ) + else: + argtypes = func_type_annotation.argtypes + returns = ast.unparse(func_type_annotation.returns) + if ( + len(argtypes) == 1 + and isinstance(argtypes[0], ast.Constant) + # pyre-ignore [16] Pyre cannot refine constant indexes (yet!) + and argtypes[0].value is Ellipsis + ): + # Only use the return type if the comment was like `(...) -> R` + return cls( + arguments={arg.arg: arg.type_comment for arg in args}, + returns=returns, + ) + elif len(argtypes) == len(args): + # Merge the type comments, preferring inline comments where available + return cls( + arguments={ + arg.arg: arg.type_comment or ast.unparse(from_func_type) + for arg, from_func_type in zip(args, argtypes) + }, + returns=returns, + ) + elif is_method and len(argtypes) == len(args) - 1: + # Merge as above, but skip merging the initial `self` or `cls` arg. + return cls( + arguments={ + args[0].arg: args[0].type_comment, + **{ + arg.arg: arg.type_comment or ast.unparse(from_func_type) + for arg, from_func_type in zip(args[1:], argtypes) + }, + }, + returns=returns, + ) + else: + # On arity mismatches, ignore the type information + return cls({}, None) + + +class ConvertTypeComments(VisitorBasedCodemodCommand): + DESCRIPTION = """ + Codemod that converts type comments into Python 3.6+ style + annotations. + + Notes: + - This transform requires using the `ast` module, which is not compatible + with multiprocessing. So you should run using a recent version of python, + and set `--jobs=1` if using `python -m libcst.tool codemod ...` from the + commandline. + - This transform requires capabilities from `ast` that are not available + prior to Python 3.9, so libcst must run on Python 3.9+. The code you are + transforming can by Python 3.6+, this limitation applies only to libcst + itself. + + We can handle type comments in the following statement types: + - Assign + - This is converted into a single AnnAssign when possible + - In more complicated cases it will produce multiple AnnAssign + nodes with no value (i.e. "type declaration" statements) + followed by an Assign + - For and With + - We prepend both of these with type declaration statements. + - FunctionDef + - We apply all the types we can find. If we find several: + - We prefer any existing annotations to type comments + - For parameters, we prefer inline type comments to + function-level type comments if we find both. + + We always apply the type comments as quote_annotations annotations, unless + we know that it refers to a builtin. We do not guarantee that + the resulting string annotations would parse, but they should + never cause failures at module import time. + + We attempt to: + - Always strip type comments for statements where we successfully + applied types. + - Never strip type comments for statements where we failed to + apply types. + + There are many edge case possible where the arity of a type + hint (which is either a tuple or a func_type) might not match + the code. In these cases we generally give up: + - For Assign, For, and With, we require that every target of + bindings (e.g. a tuple of names being bound) must have exactly + the same arity as the comment. + - So, for example, we would skip an assignment statement such as + ``x = y, z = 1, 2 # type: int, int`` because the arity + of ``x`` does not match the arity of the hint. + - For FunctionDef, we do *not* check arity of inline parameter + type comments but we do skip the transform if the arity of + the function does not match the function-level comment. + """ + + # Finding the location of a type comment in a FunctionDef is difficult. + # + # As a result, if when visiting a FunctionDef header we are able to + # successfully extrct type information then we aggressively strip type + # comments until we reach the first statement in the body. + # + # Once we get there we have to stop, so that we don't unintentionally remove + # unprocessed type comments. + # + # This state handles tracking everything we need for this. + function_type_info_stack: List[FunctionTypeInfo] + function_body_stack: List[cst.BaseSuite] + aggressively_strip_type_comments: bool + + @staticmethod + def add_args(arg_parser: argparse.ArgumentParser) -> None: + arg_parser.add_argument( + "--no-quote-annotations", + action="store_true", + help=( + "Add unquoted annotations. This leads to prettier code " + + "but possibly more errors if type comments are invalid." + ), + ) + + def __init__( + self, + context: CodemodContext, + no_quote_annotations: bool = False, + ) -> None: + if (sys.version_info.major, sys.version_info.minor) < (3, 9): + # The ast module did not get `unparse` until Python 3.9, + # or `type_comments` until Python 3.8 + # + # For earlier versions of python, raise early instead of failing + # later. It might be possible to use libcst parsing and the + # typed_ast library to support earlier python versions, but this is + # not a high priority. + raise NotImplementedError( + "You are trying to run ConvertTypeComments, but libcst " + + "needs to be running with Python 3.9+ in order to " + + "do this. Try using Python 3.9+ to run your codemod. " + + "Note that the target code can be using Python 3.6+, " + + "it is only libcst that needs a new Python version." + ) + super().__init__(context) + # flags used to control overall behavior + self.quote_annotations: bool = not no_quote_annotations + # state used to manage how we traverse nodes in various contexts + self.function_type_info_stack = [] + self.function_body_stack = [] + self.aggressively_strip_type_comments = False + + def _strip_TrailingWhitespace( + self, + node: cst.TrailingWhitespace, + ) -> cst.TrailingWhitespace: + trailing_comment = _strip_type_comment(node.comment) + if trailing_comment is not None: + return node.with_changes(comment=trailing_comment) + return node.with_changes( + whitespace=cst.SimpleWhitespace( + "" + ), # any whitespace came before the comment, so strip it. + comment=None, + ) + + def leave_SimpleStatementLine( + self, + original_node: cst.SimpleStatementLine, + updated_node: cst.SimpleStatementLine, + ) -> Union[cst.SimpleStatementLine, cst.FlattenSentinel]: + """ + Convert any SimpleStatementLine containing an Assign with a + type comment into a one that uses a PEP 526 AnnAssign. + """ + # determine whether to apply an annotation + assign = updated_node.body[-1] + if not isinstance(assign, cst.Assign): # only Assign matters + return updated_node + annotation = _annotation_for_statement(original_node) + if annotation is None: + return updated_node + # At this point have a single-line Assign with a type comment. + # Convert it to an AnnAssign and strip the comment. + converted = convert_Assign( + node=assign, + annotation=annotation, + quote_annotations=self.quote_annotations, + ) + if isinstance(converted, _FailedToApplyAnnotation): + # We were unable to consume the type comment, so return the + # original code unchanged. + # TODO: allow stripping the invalid type comments via a flag + return updated_node + elif isinstance(converted, cst.AnnAssign): + # We were able to convert the Assign into an AnnAssign, so + # we can update the node. + return updated_node.with_changes( + body=[*updated_node.body[:-1], converted], + trailing_whitespace=self._strip_TrailingWhitespace( + updated_node.trailing_whitespace, + ), + ) + elif isinstance(converted, list): + # We need to inject two or more type declarations. + # + # In this case, we need to split across multiple lines, and + # this also means we'll spread any multi-statement lines out + # (multi-statement lines are PEP 8 violating anyway). + # + # We still preserve leading lines from before our transform. + new_statements = [ + *( + statement.with_changes( + semicolon=cst.MaybeSentinel.DEFAULT, + ) + for statement in updated_node.body[:-1] + ), + *converted, + ] + if len(new_statements) < 2: + raise RuntimeError("Unreachable code.") + return cst.FlattenSentinel( + [ + updated_node.with_changes( + body=[new_statements[0]], + trailing_whitespace=self._strip_TrailingWhitespace( + updated_node.trailing_whitespace, + ), + ), + *( + cst.SimpleStatementLine(body=[statement]) + for statement in new_statements[1:] + ), + ] + ) + else: + raise RuntimeError(f"Unhandled value {converted}") + + def leave_For( + self, + original_node: cst.For, + updated_node: cst.For, + ) -> Union[cst.For, cst.FlattenSentinel]: + """ + Convert a For with a type hint on the bound variable(s) to + use type declarations. + """ + # Type comments are only possible when the body is an indented + # block, and we need this refinement to work with the header, + # so we check and only then extract the type comment. + body = updated_node.body + if not isinstance(body, cst.IndentedBlock): + return updated_node + annotation = _annotation_for_statement(original_node) + if annotation is None: + return updated_node + # Zip up the type hint and the bindings. If we hit an arity + # error, abort. + try: + type_declarations = AnnotationSpreader.type_declaration_statements( + bindings=AnnotationSpreader.unpack_target(updated_node.target), + annotations=AnnotationSpreader.unpack_annotation(annotation), + leading_lines=updated_node.leading_lines, + quote_annotations=self.quote_annotations, + ) + except _ArityError: + return updated_node + # There is no arity error, so we can add the type delaration(s) + return cst.FlattenSentinel( + [ + *type_declarations, + updated_node.with_changes( + body=body.with_changes( + header=self._strip_TrailingWhitespace(body.header) + ), + leading_lines=[], + ), + ] + ) + + def leave_With( + self, + original_node: cst.With, + updated_node: cst.With, + ) -> Union[cst.With, cst.FlattenSentinel]: + """ + Convert a With with a type hint on the bound variable(s) to + use type declarations. + """ + # Type comments are only possible when the body is an indented + # block, and we need this refinement to work with the header, + # so we check and only then extract the type comment. + body = updated_node.body + if not isinstance(body, cst.IndentedBlock): + return updated_node + annotation = _annotation_for_statement(original_node) + if annotation is None: + return updated_node + # PEP 484 does not attempt to specify type comment semantics for + # multiple with bindings (there's more than one sensible way to + # do it), so we make no attempt to handle this + targets = [ + item.asname.name for item in updated_node.items if item.asname is not None + ] + if len(targets) != 1: + return updated_node + target = targets[0] + # Zip up the type hint and the bindings. If we hit an arity + # error, abort. + try: + type_declarations = AnnotationSpreader.type_declaration_statements( + bindings=AnnotationSpreader.unpack_target(target), + annotations=AnnotationSpreader.unpack_annotation(annotation), + leading_lines=updated_node.leading_lines, + quote_annotations=self.quote_annotations, + ) + except _ArityError: + return updated_node + # There is no arity error, so we can add the type delaration(s) + return cst.FlattenSentinel( + [ + *type_declarations, + updated_node.with_changes( + body=body.with_changes( + header=self._strip_TrailingWhitespace(body.header) + ), + leading_lines=[], + ), + ] + ) + + # Handle function definitions ------------------------- + + # **Implementation Notes** + # + # It is much harder to predict where exactly type comments will live + # in function definitions than in Assign / For / With. + # + # As a result, we use two different patterns: + # (A) we aggressively strip out type comments from whitespace between the + # start of a function define and the start of the body, whenever we were + # able to extract type information. This is done via mutable state and the + # usual visitor pattern. + # (B) we also manually reach down to the first statement inside of the + # function body and aggressively strip type comments from leading + # whitespaces + # + # PEP 484 underspecifies how to apply type comments to (non-static) + # methods - it would be possible to provide a type for `self`, or to omit + # it. So we accept either approach when interpreting type comments on + # non-static methods: the first argument an have a type provided or not. + + def _visit_FunctionDef( + self, + node: cst.FunctionDef, + is_method: bool, + ) -> None: + """ + Set up the data we need to handle function definitions: + - Parse the type comments. + - Store the resulting function type info on the stack, where it will + remain until we use it in `leave_FunctionDef` + - Set that we are aggressively stripping type comments, which will + remain true until we visit the body. + """ + function_type_info = FunctionTypeInfo.from_cst(node, is_method=is_method) + self.aggressively_strip_type_comments = not function_type_info.is_empty() + self.function_type_info_stack.append(function_type_info) + self.function_body_stack.append(node.body) + + @m.call_if_not_inside(m.ClassDef()) + @m.visit(m.FunctionDef()) + def visit_method( + self, + node: cst.FunctionDef, + ) -> None: + return self._visit_FunctionDef( + node=node, + is_method=False, + ) + + @m.call_if_inside(m.ClassDef()) + @m.visit(m.FunctionDef()) + def visit_function( + self, + node: cst.FunctionDef, + ) -> None: + return self._visit_FunctionDef( + node=node, + is_method=not any( + m.matches(d.decorator, m.Name("staticmethod")) for d in node.decorators + ), + ) + + def leave_TrailingWhitespace( + self, + original_node: cst.TrailingWhitespace, + updated_node: cst.TrailingWhitespace, + ) -> Union[cst.TrailingWhitespace]: + "Aggressively remove type comments when in header if we extracted types." + if self.aggressively_strip_type_comments and _is_type_comment( + updated_node.comment + ): + return cst.TrailingWhitespace() + else: + return updated_node + + def leave_EmptyLine( + self, + original_node: cst.EmptyLine, + updated_node: cst.EmptyLine, + ) -> Union[cst.EmptyLine, cst.RemovalSentinel]: + "Aggressively remove type comments when in header if we extracted types." + if self.aggressively_strip_type_comments and _is_type_comment( + updated_node.comment + ): + return cst.RemovalSentinel.REMOVE + else: + return updated_node + + def visit_FunctionDef_body( + self, + node: cst.FunctionDef, + ) -> None: + "Turn off aggressive type comment removal when we've left the header." + self.aggressively_strip_type_comments = False + + def leave_IndentedBlock( + self, + original_node: cst.IndentedBlock, + updated_node: cst.IndentedBlock, + ) -> cst.IndentedBlock: + "When appropriate, strip function type comment from the function body." + # abort unless this is the body of a function we are transforming + if len(self.function_body_stack) == 0: + return updated_node + if original_node is not self.function_body_stack[-1]: + return updated_node + if self.function_type_info_stack[-1].is_empty(): + return updated_node + # The comment will be in the body header if it was on the same line + # as the colon. + if _is_type_comment(updated_node.header.comment): + updated_node = updated_node.with_changes( + header=cst.TrailingWhitespace(), + ) + # The comment will be in a leading line of the first body statement + # if it was on the first line after the colon. + first_statement = updated_node.body[0] + if not hasattr(first_statement, "leading_lines"): + return updated_node + return updated_node.with_changes( + body=[ + first_statement.with_changes( + leading_lines=[ + line + # pyre-ignore[16]: we refined via `hasattr` + for line in first_statement.leading_lines + if not _is_type_comment(line.comment) + ] + ), + *updated_node.body[1:], + ] + ) + + # Methods for adding type annotations ---- + # + # By the time we get here, all type comments should already be stripped. + + def leave_Param( + self, + original_node: cst.Param, + updated_node: cst.Param, + ) -> cst.Param: + # ignore type comments if there's already an annotation + if updated_node.annotation is not None: + return updated_node + # find out if there's a type comment and apply it if so + function_type_info = self.function_type_info_stack[-1] + raw_annotation = function_type_info.arguments.get(updated_node.name.value) + if raw_annotation is not None: + return updated_node.with_changes( + annotation=_convert_annotation( + raw=raw_annotation, + quote_annotations=self.quote_annotations, + ) + ) + else: + return updated_node + + def leave_FunctionDef( + self, + original_node: cst.FunctionDef, + updated_node: cst.FunctionDef, + ) -> cst.FunctionDef: + self.function_body_stack.pop() + function_type_info = self.function_type_info_stack.pop() + if updated_node.returns is None and function_type_info.returns is not None: + return updated_node.with_changes( + returns=_convert_annotation( + raw=function_type_info.returns, + quote_annotations=self.quote_annotations, + ) + ) + else: + return updated_node + + def visit_Lambda( + self, + node: cst.Lambda, + ) -> bool: + """ + Disable traversing under lambdas. They don't have any statements + nested inside them so there's no need, and they do have Params which + we don't want to transform. + """ + return False diff --git a/libcst/codemod/commands/convert_union_to_or.py b/libcst/codemod/commands/convert_union_to_or.py new file mode 100644 index 00000000..96a64314 --- /dev/null +++ b/libcst/codemod/commands/convert_union_to_or.py @@ -0,0 +1,56 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# pyre-strict + +import libcst as cst +from libcst.codemod import VisitorBasedCodemodCommand +from libcst.codemod.visitors import RemoveImportsVisitor +from libcst.metadata import QualifiedName, QualifiedNameProvider, QualifiedNameSource + + +class ConvertUnionToOrCommand(VisitorBasedCodemodCommand): + DESCRIPTION: str = "Convert `Union[A, B]` to `A | B` in Python 3.10+" + + METADATA_DEPENDENCIES = (QualifiedNameProvider,) + + def leave_Subscript( + self, original_node: cst.Subscript, updated_node: cst.Subscript + ) -> cst.BaseExpression: + """ + Given a subscript, check if it's a Union - if so, either flatten the members + into a nested BitOr (if multiple members) or unwrap the type (if only one member). + """ + if not QualifiedNameProvider.has_name( + self, + original_node, + QualifiedName(name="typing.Union", source=QualifiedNameSource.IMPORT), + ): + return updated_node + types = [ + cst.ensure_type( + cst.ensure_type(s, cst.SubscriptElement).slice, cst.Index + ).value + for s in updated_node.slice + ] + if len(types) == 1: + return types[0] + else: + replacement = cst.BinaryOperation( + left=types[0], right=types[1], operator=cst.BitOr() + ) + for type_ in types[2:]: + replacement = cst.BinaryOperation( + left=replacement, right=type_, operator=cst.BitOr() + ) + return replacement + + def leave_Module( + self, original_node: cst.Module, updated_node: cst.Module + ) -> cst.Module: + RemoveImportsVisitor.remove_unused_import( + self.context, module="typing", obj="Union" + ) + return updated_node diff --git a/libcst/codemod/commands/ensure_import_present.py b/libcst/codemod/commands/ensure_import_present.py index c2ec033f..44dda822 100644 --- a/libcst/codemod/commands/ensure_import_present.py +++ b/libcst/codemod/commands/ensure_import_present.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -11,15 +11,14 @@ from libcst.codemod.visitors import AddImportsVisitor class EnsureImportPresentCommand(MagicArgsCodemodCommand): - DESCRIPTION: str = ( "Given a module and possibly an entity in that module, add an import " + "as long as one does not already exist." ) @staticmethod - def add_args(parser: argparse.ArgumentParser) -> None: - parser.add_argument( + def add_args(arg_parser: argparse.ArgumentParser) -> None: + arg_parser.add_argument( "--module", dest="module", metavar="MODULE", @@ -27,7 +26,7 @@ class EnsureImportPresentCommand(MagicArgsCodemodCommand): type=str, required=True, ) - parser.add_argument( + arg_parser.add_argument( "--entity", dest="entity", metavar="ENTITY", @@ -38,7 +37,7 @@ class EnsureImportPresentCommand(MagicArgsCodemodCommand): type=str, default=None, ) - parser.add_argument( + arg_parser.add_argument( "--alias", dest="alias", metavar="ALIAS", diff --git a/libcst/codemod/commands/fix_pyre_directives.py b/libcst/codemod/commands/fix_pyre_directives.py index b5310d8b..a9779d0f 100644 --- a/libcst/codemod/commands/fix_pyre_directives.py +++ b/libcst/codemod/commands/fix_pyre_directives.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,6 +7,7 @@ from typing import Dict, Sequence, Union import libcst import libcst.matchers as m +from libcst import CSTLogicError from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand from libcst.helpers import insert_header_comments @@ -29,12 +30,12 @@ class FixPyreDirectivesCommand(VisitorBasedCodemodCommand): def visit_Module_header(self, node: libcst.Module) -> None: if self.in_module_header: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") self.in_module_header = True def leave_Module_header(self, node: libcst.Module) -> None: if not self.in_module_header: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") self.in_module_header = False def leave_EmptyLine( diff --git a/libcst/codemod/commands/fix_variadic_callable.py b/libcst/codemod/commands/fix_variadic_callable.py new file mode 100644 index 00000000..85cb0aa0 --- /dev/null +++ b/libcst/codemod/commands/fix_variadic_callable.py @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# pyre-strict + +import libcst as cst +import libcst.matchers as m +from libcst.codemod import VisitorBasedCodemodCommand +from libcst.metadata import QualifiedName, QualifiedNameProvider, QualifiedNameSource + + +class FixVariadicCallableCommmand(VisitorBasedCodemodCommand): + DESCRIPTION: str = ( + "Fix incorrect variadic callable type annotations from `Callable[[...], T]` to `Callable[..., T]``" + ) + + METADATA_DEPENDENCIES = (QualifiedNameProvider,) + + def leave_Subscript( + self, original_node: cst.Subscript, updated_node: cst.Subscript + ) -> cst.BaseExpression: + if QualifiedNameProvider.has_name( + self, + original_node, + QualifiedName(name="typing.Callable", source=QualifiedNameSource.IMPORT), + ): + node_matches = len(updated_node.slice) == 2 and m.matches( + updated_node.slice[0], + m.SubscriptElement( + slice=m.Index(value=m.List(elements=[m.Element(m.Ellipsis())])) + ), + ) + + if node_matches: + slices = list(updated_node.slice) + slices[0] = cst.SubscriptElement(cst.Index(cst.Ellipsis())) + return updated_node.with_changes(slice=slices) + return updated_node diff --git a/libcst/codemod/commands/noop.py b/libcst/codemod/commands/noop.py index eef1d897..23ea0a17 100644 --- a/libcst/codemod/commands/noop.py +++ b/libcst/codemod/commands/noop.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/remove_pyre_directive.py b/libcst/codemod/commands/remove_pyre_directive.py index a9d38506..01bf89a6 100644 --- a/libcst/codemod/commands/remove_pyre_directive.py +++ b/libcst/codemod/commands/remove_pyre_directive.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/remove_unused_imports.py b/libcst/codemod/commands/remove_unused_imports.py index 741f9a46..2e6beafa 100644 --- a/libcst/codemod/commands/remove_unused_imports.py +++ b/libcst/codemod/commands/remove_unused_imports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,10 +9,9 @@ from typing import Set, Tuple, Union from libcst import Import, ImportFrom, ImportStar, Module from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand from libcst.codemod.visitors import GatherCommentsVisitor, RemoveImportsVisitor -from libcst.helpers import get_absolute_module_for_import +from libcst.helpers import get_absolute_module_from_package_for_import from libcst.metadata import PositionProvider, ProviderT - DEFAULT_SUPPRESS_COMMENT_REGEX = ( r".*\W(noqa|lint-ignore: ?unused-import|lint-ignore: ?F401)(\W.*)?$" ) @@ -75,8 +74,8 @@ class RemoveUnusedImportsCommand(VisitorBasedCodemodCommand): asname=alias.evaluated_alias, ) else: - module_name = get_absolute_module_for_import( - self.context.full_module_name, node + module_name = get_absolute_module_from_package_for_import( + self.context.full_package_name, node ) if module_name is None: raise ValueError( diff --git a/libcst/codemod/commands/rename.py b/libcst/codemod/commands/rename.py index 03d5ddef..f3accdcd 100644 --- a/libcst/codemod/commands/rename.py +++ b/libcst/codemod/commands/rename.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -15,7 +15,7 @@ from libcst.metadata import QualifiedNameProvider def leave_import_decorator( - method: Callable[..., Union[cst.Import, cst.ImportFrom]] + method: Callable[..., Union[cst.Import, cst.ImportFrom]], ) -> Callable[..., Union[cst.Import, cst.ImportFrom]]: # We want to record any 'as name' that is relevant but only after we leave the corresponding Import/ImportFrom node since # we don't want the 'as name' to interfere with children 'Name' and 'Attribute' nodes. @@ -42,15 +42,15 @@ class RenameCommand(VisitorBasedCodemodCommand): METADATA_DEPENDENCIES = (QualifiedNameProvider,) @staticmethod - def add_args(parser: argparse.ArgumentParser) -> None: - parser.add_argument( + def add_args(arg_parser: argparse.ArgumentParser) -> None: + arg_parser.add_argument( "--old_name", dest="old_name", required=True, help="Full dotted name of object to rename. Eg: `foo.bar.baz`", ) - parser.add_argument( + arg_parser.add_argument( "--new_name", dest="new_name", required=True, @@ -92,14 +92,43 @@ class RenameCommand(VisitorBasedCodemodCommand): self.old_module: str = old_module self.old_mod_or_obj: str = old_mod_or_obj - self.as_name: Optional[Tuple[str, str]] = None + @property + def as_name(self) -> Optional[Tuple[str, str]]: + if "as_name" not in self.context.scratch: + self.context.scratch["as_name"] = None + return self.context.scratch["as_name"] - # A set of nodes that have been renamed to help with the cleanup of now potentially unused - # imports, during import cleanup in `leave_Module`. - self.scheduled_removals: Set[cst.CSTNode] = set() - # If an import has been renamed while inside an `Import` or `ImportFrom` node, we want to flag - # this so that we do not end up with two of the same import. - self.bypass_import = False + @as_name.setter + def as_name(self, value: Optional[Tuple[str, str]]) -> None: + self.context.scratch["as_name"] = value + + @property + def scheduled_removals( + self, + ) -> Set[Union[cst.CSTNode, Tuple[str, Optional[str], Optional[str]]]]: + """A set of nodes that have been renamed to help with the cleanup of now potentially unused + imports, during import cleanup in `leave_Module`. Can also contain tuples that can be passed + directly to RemoveImportsVisitor.remove_unused_import().""" + if "scheduled_removals" not in self.context.scratch: + self.context.scratch["scheduled_removals"] = set() + return self.context.scratch["scheduled_removals"] + + @scheduled_removals.setter + def scheduled_removals( + self, value: Set[Union[cst.CSTNode, Tuple[str, Optional[str], Optional[str]]]] + ) -> None: + self.context.scratch["scheduled_removals"] = value + + @property + def bypass_import(self) -> bool: + """A flag to indicate that an import has been renamed while inside an `Import` or `ImportFrom` node.""" + if "bypass_import" not in self.context.scratch: + self.context.scratch["bypass_import"] = False + return self.context.scratch["bypass_import"] + + @bypass_import.setter + def bypass_import(self, value: bool) -> None: + self.context.scratch["bypass_import"] = value def visit_Import(self, node: cst.Import) -> None: for import_alias in node.names: @@ -118,40 +147,42 @@ class RenameCommand(VisitorBasedCodemodCommand): ) -> cst.Import: new_names = [] for import_alias in updated_node.names: + # We keep the original import_alias here in case it's used by other symbols. + # It will be removed later in RemoveImportsVisitor if it's unused. + new_names.append(import_alias) import_alias_name = import_alias.name import_alias_full_name = get_full_name_for_node(import_alias_name) if import_alias_full_name is None: - raise Exception("Could not parse full name for ImportAlias.name node.") + raise ValueError("Could not parse full name for ImportAlias.name node.") - if isinstance(import_alias_name, cst.Name) and self.old_name.startswith( - import_alias_full_name + "." + if self.old_name.startswith(import_alias_full_name + "."): + replacement_module = self.gen_replacement_module(import_alias_full_name) + if not replacement_module: + # here import_alias_full_name isn't an exact match for old_name + # don't add an import here, it will be handled either in more + # specific import aliases or at the very end + continue + self.bypass_import = True + if replacement_module != import_alias_full_name: + self.scheduled_removals.add(original_node) + new_name_node: Union[cst.Attribute, cst.Name] = ( + self.gen_name_or_attr_node(replacement_module) + ) + new_names.append(cst.ImportAlias(name=new_name_node)) + elif ( + import_alias_full_name == self.new_name + and import_alias.asname is not None ): - # Might, be in use elsewhere in the code, so schedule a potential removal, and add another alias. - new_names.append(import_alias) - self.scheduled_removals.add(original_node) - new_names.append( - cst.ImportAlias( - name=cst.Name( - value=self.gen_replacement_module(import_alias_full_name) - ) + self.bypass_import = True + # Add removal tuple instead of calling directly + self.scheduled_removals.add( + ( + import_alias.evaluated_name, + None, + import_alias.evaluated_alias, ) ) - self.bypass_import = True - elif isinstance( - import_alias_name, cst.Attribute - ) and self.old_name.startswith(import_alias_full_name + "."): - # Same idea as above. - new_names.append(import_alias) - self.scheduled_removals.add(original_node) - new_name_node: Union[ - cst.Attribute, cst.Name - ] = self.gen_name_or_attr_node( - self.gen_replacement_module(import_alias_full_name) - ) - new_names.append(cst.ImportAlias(name=new_name_node)) - self.bypass_import = True - else: - new_names.append(import_alias) + new_names.append(import_alias.with_changes(asname=None)) return updated_node.with_changes(names=new_names) @@ -183,13 +214,12 @@ class RenameCommand(VisitorBasedCodemodCommand): return updated_node else: - new_names = [] + new_names: list[cst.ImportAlias] = [] for import_alias in names: alias_name = get_full_name_for_node(import_alias.name) if alias_name is not None: qual_name = f"{imported_module_name}.{alias_name}" if self.old_name == qual_name: - replacement_module = self.gen_replacement_module( imported_module_name ) @@ -201,16 +231,16 @@ class RenameCommand(VisitorBasedCodemodCommand): self.scheduled_removals.add(original_node) continue - new_import_alias_name: Union[ - cst.Attribute, cst.Name - ] = self.gen_name_or_attr_node(replacement_obj) + new_import_alias_name: Union[cst.Attribute, cst.Name] = ( + self.gen_name_or_attr_node(replacement_obj) + ) # Rename on the spot only if this is the only imported name under the module. if len(names) == 1: - self.bypass_import = True - return updated_node.with_changes( + updated_node = updated_node.with_changes( module=cst.parse_expression(replacement_module), - names=(cst.ImportAlias(name=new_import_alias_name),), ) + self.scheduled_removals.add(updated_node) + new_names.append(import_alias) # Or if the module name is to stay the same. elif replacement_module == imported_module_name: self.bypass_import = True @@ -222,6 +252,10 @@ class RenameCommand(VisitorBasedCodemodCommand): # This import might be in use elsewhere in the code, so schedule a potential removal. self.scheduled_removals.add(original_node) new_names.append(import_alias) + if isinstance(new_names[-1].comma, cst.Comma) and updated_node.rpar is None: + new_names[-1] = new_names[-1].with_changes( + comma=cst.MaybeSentinel.DEFAULT + ) return updated_node.with_changes(names=new_names) return updated_node @@ -252,29 +286,30 @@ class RenameCommand(VisitorBasedCodemodCommand): ) -> Union[cst.Name, cst.Attribute]: full_name_for_node = get_full_name_for_node(original_node) if full_name_for_node is None: - raise Exception("Could not parse full name for Attribute node.") + raise ValueError("Could not parse full name for Attribute node.") full_replacement_name = self.gen_replacement(full_name_for_node) # If a node has no associated QualifiedName, we are still inside an import statement. inside_import_statement: bool = not self.get_metadata( QualifiedNameProvider, original_node, set() ) - if ( - QualifiedNameProvider.has_name( - self, - original_node, - self.old_name, - ) - or (inside_import_statement and full_replacement_name == self.new_name) - ): + if QualifiedNameProvider.has_name( + self, + original_node, + self.old_name, + ) or (inside_import_statement and full_replacement_name == self.new_name): new_value, new_attr = self.new_module, self.new_mod_or_obj if not inside_import_statement: self.scheduled_removals.add(original_node.value) if full_replacement_name == self.new_name: - return updated_node.with_changes( - value=cst.parse_expression(new_value), - attr=cst.Name(value=new_attr.rstrip(".")), - ) + value = cst.parse_expression(new_value) + if new_attr: + return updated_node.with_changes( + value=value, + attr=cst.Name(value=new_attr.rstrip(".")), + ) + assert isinstance(value, (cst.Name, cst.Attribute)) + return value return self.gen_name_or_attr_node(new_attr) @@ -283,14 +318,17 @@ class RenameCommand(VisitorBasedCodemodCommand): def leave_Module( self, original_node: cst.Module, updated_node: cst.Module ) -> cst.Module: - for removal_node in self.scheduled_removals: - RemoveImportsVisitor.remove_unused_import_by_node( - self.context, removal_node - ) + for removal in self.scheduled_removals: + if isinstance(removal, tuple): + RemoveImportsVisitor.remove_unused_import( + self.context, removal[0], removal[1], removal[2] + ) + else: + RemoveImportsVisitor.remove_unused_import_by_node(self.context, removal) # If bypass_import is False, we know that no import statements were directly renamed, and the fact # that we have any `self.scheduled_removals` tells us we encountered a matching `old_name` in the code. if not self.bypass_import and self.scheduled_removals: - if self.new_module: + if self.new_module and self.new_module != "builtins": new_obj: Optional[str] = ( self.new_mod_or_obj.split(".")[0] if self.new_mod_or_obj else None ) @@ -309,10 +347,14 @@ class RenameCommand(VisitorBasedCodemodCommand): module_as_name[0] + ".", module_as_name[1] + ".", 1 ) - if original_name == self.old_mod_or_obj: + if self.old_module and original_name == self.old_mod_or_obj: return self.new_mod_or_obj - elif original_name == ".".join([self.old_module, self.old_mod_or_obj]): - return self.new_name + elif original_name == self.old_name: + return ( + self.new_mod_or_obj + if (not self.bypass_import and self.new_mod_or_obj) + else self.new_name + ) elif original_name.endswith("." + self.old_mod_or_obj): return self.new_mod_or_obj else: @@ -326,7 +368,7 @@ class RenameCommand(VisitorBasedCodemodCommand): ) -> Union[cst.Attribute, cst.Name]: name_or_attr_node: cst.BaseExpression = cst.parse_expression(dotted_expression) if not isinstance(name_or_attr_node, (cst.Name, cst.Attribute)): - raise Exception( + raise ValueError( "`parse_expression()` on dotted path returned non-Attribute-or-Name." ) return name_or_attr_node diff --git a/libcst/codemod/commands/rename_typing_generic_aliases.py b/libcst/codemod/commands/rename_typing_generic_aliases.py new file mode 100644 index 00000000..d6906fe9 --- /dev/null +++ b/libcst/codemod/commands/rename_typing_generic_aliases.py @@ -0,0 +1,37 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# pyre-strict +from functools import partial +from typing import cast, Generator + +from libcst.codemod import Codemod, MagicArgsCodemodCommand +from libcst.codemod.commands.rename import RenameCommand + + +class RenameTypingGenericAliases(MagicArgsCodemodCommand): + DESCRIPTION: str = ( + "Rename typing module aliases of builtin generics in Python 3.9+, for example: `typing.List` -> `list`" + ) + + MAPPING: dict[str, str] = { + "typing.List": "builtins.list", + "typing.Tuple": "builtins.tuple", + "typing.Dict": "builtins.dict", + "typing.FrozenSet": "builtins.frozenset", + "typing.Set": "builtins.set", + "typing.Type": "builtins.type", + } + + def get_transforms(self) -> Generator[type[Codemod], None, None]: + for from_type, to_type in self.MAPPING.items(): + yield cast( + type[Codemod], + partial( + RenameCommand, + old_name=from_type, + new_name=to_type, + ), + ) diff --git a/libcst/codemod/commands/strip_strings_from_types.py b/libcst/codemod/commands/strip_strings_from_types.py index b219a7db..3f0894cd 100644 --- a/libcst/codemod/commands/strip_strings_from_types.py +++ b/libcst/codemod/commands/strip_strings_from_types.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -14,7 +14,6 @@ from libcst.metadata import QualifiedNameProvider class StripStringsCommand(VisitorBasedCodemodCommand): - DESCRIPTION: str = ( "Converts string type annotations to 3.7-compatible forward references." ) @@ -44,8 +43,12 @@ class StripStringsCommand(VisitorBasedCodemodCommand): self, original_node: libcst.SimpleString, updated_node: libcst.SimpleString ) -> Union[libcst.SimpleString, libcst.BaseExpression]: AddImportsVisitor.add_needed_import(self.context, "__future__", "annotations") + evaluated_value = updated_node.evaluated_value # Just use LibCST to evaluate the expression itself, and insert that as the # annotation. - return parse_expression( - updated_node.evaluated_value, config=self.module.config_for_parsing - ) + if isinstance(evaluated_value, str): + return parse_expression( + evaluated_value, config=self.module.config_for_parsing + ) + else: + return updated_node diff --git a/libcst/codemod/commands/tests/__init__.py b/libcst/codemod/commands/tests/__init__.py index 602d2685..aac70d45 100644 --- a/libcst/codemod/commands/tests/__init__.py +++ b/libcst/codemod/commands/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/tests/test_add_pyre_directive.py b/libcst/codemod/commands/tests/test_add_pyre_directive.py index b7fad93d..37e6f2f9 100644 --- a/libcst/codemod/commands/tests/test_add_pyre_directive.py +++ b/libcst/codemod/commands/tests/test_add_pyre_directive.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,6 @@ from libcst.codemod.commands.add_pyre_directive import AddPyreUnsafeCommand class TestAddPyreUnsafeCommand(CodemodTest): - TRANSFORM = AddPyreUnsafeCommand def test_add_to_file(self) -> None: diff --git a/libcst/codemod/commands/tests/test_add_trailing_commas.py b/libcst/codemod/commands/tests/test_add_trailing_commas.py new file mode 100644 index 00000000..1df31b69 --- /dev/null +++ b/libcst/codemod/commands/tests/test_add_trailing_commas.py @@ -0,0 +1,90 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +from libcst.codemod import CodemodTest +from libcst.codemod.commands.add_trailing_commas import AddTrailingCommas + + +class AddTrailingCommasTest(CodemodTest): + TRANSFORM = AddTrailingCommas + + def test_transform_defines(self) -> None: + before = """ + def f(x, y): + pass + + """ + after = """ + def f(x, y,): + pass + """ + self.assertCodemod(before, after) + + def test_skip_transforming_defines(self) -> None: + before = """ + # skip defines with no params. + def f0(): + pass + + # skip defines with a single param named `self`. + class Foo: + def __init__(self): + pass + """ + after = before + self.assertCodemod(before, after) + + def test_transform_calls(self) -> None: + before = """ + f(a, b, c) + + g(x=a, y=b, z=c) + """ + after = """ + f(a, b, c,) + + g(x=a, y=b, z=c,) + """ + self.assertCodemod(before, after) + + def test_skip_transforming_calls(self) -> None: + before = """ + # skip empty calls + f() + + # skip calls with one argument + g(a) + g(x=a) + """ + after = before + self.assertCodemod(before, after) + + def test_using_yapf_presets(self) -> None: + before = """ + def f(x): # skip single parameters for yapf + pass + + def g(x, y): + pass + """ + after = """ + def f(x): # skip single parameters for yapf + pass + + def g(x, y,): + pass + """ + self.assertCodemod(before, after, formatter="yapf") + + def test_using_custom_presets(self) -> None: + before = """ + def f(x, y, z): + pass + + f(5, 6, 7) + """ + after = before + self.assertCodemod(before, after, parameter_count=4, argument_count=4) diff --git a/libcst/codemod/commands/tests/test_convert_format_to_fstring.py b/libcst/codemod/commands/tests/test_convert_format_to_fstring.py index 35a7aece..1a10303b 100644 --- a/libcst/codemod/commands/tests/test_convert_format_to_fstring.py +++ b/libcst/codemod/commands/tests/test_convert_format_to_fstring.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,6 @@ from libcst.codemod.commands.convert_format_to_fstring import ConvertFormatStrin class ConvertFormatStringCommandTest(CodemodTest): - TRANSFORM = ConvertFormatStringCommand def test_noop(self) -> None: diff --git a/libcst/codemod/commands/tests/test_convert_namedtuple_to_dataclass.py b/libcst/codemod/commands/tests/test_convert_namedtuple_to_dataclass.py index 675bf58a..8e0b314d 100644 --- a/libcst/codemod/commands/tests/test_convert_namedtuple_to_dataclass.py +++ b/libcst/codemod/commands/tests/test_convert_namedtuple_to_dataclass.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -10,7 +10,6 @@ from libcst.codemod.commands.convert_namedtuple_to_dataclass import ( class ConvertNamedTupleToDataclassCommandTest(CodemodTest): - TRANSFORM = ConvertNamedTupleToDataclassCommand def test_no_change(self) -> None: diff --git a/libcst/codemod/commands/tests/test_convert_percent_format_to_fstring.py b/libcst/codemod/commands/tests/test_convert_percent_format_to_fstring.py index 2e65eac2..af34d3e1 100644 --- a/libcst/codemod/commands/tests/test_convert_percent_format_to_fstring.py +++ b/libcst/codemod/commands/tests/test_convert_percent_format_to_fstring.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/tests/test_convert_type_comments.py b/libcst/codemod/commands/tests/test_convert_type_comments.py new file mode 100644 index 00000000..2c5917d1 --- /dev/null +++ b/libcst/codemod/commands/tests/test_convert_type_comments.py @@ -0,0 +1,481 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import sys +from typing import Any + +from libcst.codemod import CodemodTest +from libcst.codemod.commands.convert_type_comments import ConvertTypeComments + + +class TestConvertTypeCommentsBase(CodemodTest): + maxDiff = 1500 + TRANSFORM = ConvertTypeComments + + def assertCodemod39Plus(self, before: str, after: str, **kwargs: Any) -> None: + """ + Assert that the codemod works on Python 3.9+, and that we raise + a NotImplementedError on other Python versions. + """ + if (sys.version_info.major, sys.version_info.minor) < (3, 9): + with self.assertRaises(NotImplementedError): + super().assertCodemod(before, after, **kwargs) + else: + super().assertCodemod(before, after, **kwargs) + + +class TestConvertTypeComments_AssignForWith(TestConvertTypeCommentsBase): + def test_preserves_trailing_comment(self) -> None: + before = """ + y = 5 # type: int # foo + """ + after = """ + y: int = 5 # foo + """ + self.assertCodemod39Plus(before, after) + + def test_convert_assignments(self) -> None: + before = """ + y = 5 # type: int + z = ('this', 7) # type: typing.Tuple[str, int] + """ + after = """ + y: int = 5 + z: "typing.Tuple[str, int]" = ('this', 7) + """ + self.assertCodemod39Plus(before, after) + + def test_convert_assignments_in_context(self) -> None: + """ + Also verify that our matching works regardless of spacing + """ + before = """ + def foo(): + z = ('this', 7) # type: typing.Tuple[str, int] + + class C: + attr0 = 10# type: int + def __init__(self): + self.attr1 = True # type: bool + """ + after = """ + def foo(): + z: "typing.Tuple[str, int]" = ('this', 7) + + class C: + attr0: int = 10 + def __init__(self): + self.attr1: bool = True + """ + self.assertCodemod39Plus(before, after) + + def test_multiple_elements_in_assign_lhs(self) -> None: + before = """ + x, y = [], [] # type: List[int], List[str] + z, w = [], [] # type: (List[int], List[str]) + + a, b, *c = range(5) # type: float, float, List[float] + + d, (e1, e2) = foo() # type: float, (int, str) + """ + after = """ + x: "List[int]" + y: "List[str]" + x, y = [], [] + z: "List[int]" + w: "List[str]" + z, w = [], [] + + a: float + b: float + c: "List[float]" + a, b, *c = range(5) + + d: float + e1: int + e2: str + d, (e1, e2) = foo() + """ + self.assertCodemod39Plus(before, after) + + def test_multiple_assignments(self) -> None: + before = """ + x = y = z = 15 # type: int + + a, b = c, d = 'this', 'that' # type: (str, str) + """ + after = """ + x: int + y: int + z: int + x = y = z = 15 + + a: str + b: str + c: str + d: str + a, b = c, d = 'this', 'that' + """ + self.assertCodemod39Plus(before, after) + + def test_semicolons_with_assignment(self) -> None: + """ + When we convert an Assign to an AnnAssign, preserve + semicolons. But if we have to add separate type declarations, + expand them. + """ + before = """ + foo(); x = 12 # type: int + + bar(); y, z = baz() # type: int, str + """ + after = """ + foo(); x: int = 12 + + bar() + y: int + z: str + y, z = baz() + """ + self.assertCodemod39Plus(before, after) + + def test_converting_for_statements(self) -> None: + before = """ + # simple binding + for x in foo(): # type: int + pass + + # nested binding + for (a, (b, c)) in bar(): # type: int, (str, float) + pass + """ + after = """ + # simple binding + x: int + for x in foo(): + pass + + # nested binding + a: int + b: str + c: float + for (a, (b, c)) in bar(): + pass + """ + self.assertCodemod39Plus(before, after) + + def test_converting_with_statements(self) -> None: + before = """ + # simple binding + with open('file') as f: # type: File + pass + + # simple binding, with extra items + with foo(), open('file') as f, bar(): # type: File + pass + + # nested binding + with bar() as (a, (b, c)): # type: int, (str, float) + pass + """ + after = """ + # simple binding + f: "File" + with open('file') as f: + pass + + # simple binding, with extra items + f: "File" + with foo(), open('file') as f, bar(): + pass + + # nested binding + a: int + b: str + c: float + with bar() as (a, (b, c)): + pass + """ + self.assertCodemod39Plus(before, after) + + def test_no_change_when_type_comment_unused(self) -> None: + before = """ + # type-ignores are not type comments + x = 10 # type: ignore + + # a commented type comment (per PEP 484) is not a type comment + z = 15 # # type: int + + # ignore unparseable type comments + var = "var" # type: this is not a python type! + + # a type comment in an illegal location won't be used + print("hello") # type: None + + # These examples are not PEP 484 compliant, and result in arity errors + a, b = 1, 2 # type: Tuple[int, int] + w = foo() # type: float, str + + # Multiple assigns with mismatched LHS arities always result in arity + # errors, and we only codemod if each target is error-free + v = v0, v1 = (3, 5) # type: int, int + + # Ignore for statements with arity mismatches + for x in []: # type: int, int + pass + + # Ignore with statements with arity mismatches + with open('file') as (f0, f1): # type: File + pass + + # Ignore with statements that have multiple item bindings + with open('file') as f0, open('file') as f1: # type: File + pass + + # In cases where the entire statement cannot successfully be parsed + # with `type_comments=True` because of an invalid type comment, we + # skip it. Here, annotating the inner `pass` is illegal. + for x in []: # type: int + pass # type: None + """ + after = before + self.assertCodemod39Plus(before, after) + + +class TestConvertTypeComments_FunctionDef(TestConvertTypeCommentsBase): + """ + Some notes on our testing strategy: In order to avoid a combinatorial + explosion in test cases, we leverage some knowledge about the + implementation. + + Here are the key ideas that allow us to write fewer cases: + - The logic for generating annotations is the same for all annotations, + and is well-covered by TestConvertTypeComments_AssignForWith, so we + can stick to just simple builtin types. + - The application of types is independent of where they came from. + - Type comment removal is indepenent of type application, other + than in the case where we give up entirely. + - The rules for which type gets used (existing annotation, inline comment, + or func type comment) is independent of the location of a parameter. + """ + + def test_simple_function_type_comments(self) -> None: + before = """ + def f0(x): # type: (...) -> None + pass + + def f1(x): # type: (int) -> None + pass + + def f2(x, /, y = 'y', *, z = 1.5): + # type: (int, str, float) -> None + pass + + def f3(x, *args, y, **kwargs): + # type: (str, int, str, float) -> None + pass + + def f4(x, *args, **kwargs): + # type: (str, *int, **float) -> None + pass + """ + after = """ + def f0(x) -> None: + pass + + def f1(x: int) -> None: + pass + + def f2(x: int, /, y: str = 'y', *, z: float = 1.5) -> None: + pass + + def f3(x: str, *args: int, y: str, **kwargs: float) -> None: + pass + + def f4(x: str, *args: int, **kwargs: float) -> None: + pass + """ + self.assertCodemod39Plus(before, after) + + def test_prioritization_order_for_type_application(self) -> None: + before = """ + def f( + x: int, # type: str + y, # type: str + z + ): # type: (float, float, float) -> None + pass + """ + after = """ + def f( + x: int, + y: str, + z: float + ) -> None: + pass + """ + self.assertCodemod39Plus(before, after) + + def test_inlined_function_type_comments(self) -> None: + before = """ + def f( + x, # not-a-type-comment + # also-not-a-type-comment + y = 42, # type: int + *args, + # type: technically-another-line-is-legal :o + z, + **kwargs, # type: str + ): # not-a-type-comment + # also-not-a-type-comment + pass + """ + after = """ + def f( + x, # not-a-type-comment + # also-not-a-type-comment + y: int = 42, + *args: "technically-another-line-is-legal :o", + z, + **kwargs: str, + ): # not-a-type-comment + # also-not-a-type-comment + pass + """ + self.assertCodemod39Plus(before, after) + + def test_method_transforms(self) -> None: + before = """ + class A: + + def __init__(self, thing): # type: (str) -> None + self.thing = thing + + @classmethod + def make(cls): # type: () -> A + return cls("thing") + + @staticmethod + def f(x, y): # type: (object, object) -> None + pass + + def method0( + self, + other_thing, + ): # type: (str) -> bool + return self.thing == other_thing + + def method1( + self, # type: A + other_thing, # type: str + ): # type: (int) -> bool + return self.thing == other_thing + + def method2( + self, + other_thing, + ): # type: (A, str) -> bool + return self.thing == other_thing + """ + after = """ + class A: + + def __init__(self, thing: str) -> None: + self.thing = thing + + @classmethod + def make(cls) -> "A": + return cls("thing") + + @staticmethod + def f(x: object, y: object) -> None: + pass + + def method0( + self, + other_thing: str, + ) -> bool: + return self.thing == other_thing + + def method1( + self: "A", + other_thing: str, + ) -> bool: + return self.thing == other_thing + + def method2( + self: "A", + other_thing: str, + ) -> bool: + return self.thing == other_thing + """ + self.assertCodemod39Plus(before, after) + + def test_no_change_if_function_type_comments_unused(self) -> None: + before = """ + # arity error in arguments + def f(x, y): # type: (int) -> float + pass + + # unparseable function type + def f(x, y): # type: this is not a type! + pass + + # In cases where the entire statement cannot successfully be parsed + # with `type_comments=True` because of an invalid type comment, we + # skip it. Here, annotating the inner `pass` is illegal. + def f(x, y): # type: (int, int) -> None + pass # type: None + """ + after = before + self.assertCodemod39Plus(before, after) + + def test_do_not_traverse_lambda_Param(self) -> None: + """ + The Param node can happen not just in FunctionDef but also in + Lambda. Make sure this doesn't cause problems. + """ + before = """ + @dataclass + class WrapsAFunction: + func: Callable + msg_gen: Callable = lambda self: f"calling {self.func.__name__}..." + """ + after = before + self.assertCodemod39Plus(before, after) + + def test_no_quoting(self) -> None: + before = """ + def f(x): + # type: (Foo) -> Foo + pass + w = x # type: Foo + y, z = x, x # type: (Foo, Foo) + return w + + with get_context() as context: # type: Context + pass + + for loop_var in the_iterable: # type: LoopType + pass + """ + after = """ + def f(x: Foo) -> Foo: + pass + w: Foo = x + y: Foo + z: Foo + y, z = x, x + return w + + context: Context + with get_context() as context: + pass + + loop_var: LoopType + for loop_var in the_iterable: + pass + """ + self.assertCodemod39Plus(before, after, no_quote_annotations=True) diff --git a/libcst/codemod/commands/tests/test_convert_union_to_or.py b/libcst/codemod/commands/tests/test_convert_union_to_or.py new file mode 100644 index 00000000..5ba557d2 --- /dev/null +++ b/libcst/codemod/commands/tests/test_convert_union_to_or.py @@ -0,0 +1,86 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# pyre-strict + +from libcst.codemod import CodemodTest +from libcst.codemod.commands.convert_union_to_or import ConvertUnionToOrCommand + + +class TestConvertUnionToOrCommand(CodemodTest): + TRANSFORM = ConvertUnionToOrCommand + + def test_simple_union(self) -> None: + before = """ + from typing import Union + x: Union[int, str] + """ + after = """ + x: int | str + """ + self.assertCodemod(before, after) + + def test_nested_union(self) -> None: + before = """ + from typing import Union + x: Union[int, Union[str, float]] + """ + after = """ + x: int | str | float + """ + self.assertCodemod(before, after) + + def test_single_type_union(self) -> None: + before = """ + from typing import Union + x: Union[int] + """ + after = """ + x: int + """ + self.assertCodemod(before, after) + + def test_union_with_alias(self) -> None: + before = """ + import typing as t + x: t.Union[int, str] + """ + after = """ + import typing as t + x: int | str + """ + self.assertCodemod(before, after) + + def test_union_with_unused_import(self) -> None: + before = """ + from typing import Union, List + x: Union[int, str] + """ + after = """ + from typing import List + x: int | str + """ + self.assertCodemod(before, after) + + def test_union_no_import(self) -> None: + before = """ + x: Union[int, str] + """ + after = """ + x: Union[int, str] + """ + self.assertCodemod(before, after) + + def test_union_in_function(self) -> None: + before = """ + from typing import Union + def foo(x: Union[int, str]) -> Union[float, None]: + ... + """ + after = """ + def foo(x: int | str) -> float | None: + ... + """ + self.assertCodemod(before, after) diff --git a/libcst/codemod/commands/tests/test_ensure_import_present.py b/libcst/codemod/commands/tests/test_ensure_import_present.py index b389c3bf..f9329957 100644 --- a/libcst/codemod/commands/tests/test_ensure_import_present.py +++ b/libcst/codemod/commands/tests/test_ensure_import_present.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/tests/test_fix_pyre_directives.py b/libcst/codemod/commands/tests/test_fix_pyre_directives.py index dea57825..4707073a 100644 --- a/libcst/codemod/commands/tests/test_fix_pyre_directives.py +++ b/libcst/codemod/commands/tests/test_fix_pyre_directives.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,21 +8,15 @@ from libcst.codemod.commands.fix_pyre_directives import FixPyreDirectivesCommand class TestFixPyreDirectivesCommand(CodemodTest): - TRANSFORM = FixPyreDirectivesCommand def test_no_need_to_fix_simple(self) -> None: """ Tests that a pyre-strict inside the module header doesn't get touched. """ - before = """ - # pyre-strict - from typing import List - - def baz() -> List[Foo]: - pass - """ - after = """ + after = ( + before + ) = """ # pyre-strict from typing import List @@ -35,16 +29,9 @@ class TestFixPyreDirectivesCommand(CodemodTest): """ Tests that a pyre-strict inside the module header doesn't get touched. """ - before = """ - # This is some header comment. - # - # pyre-strict - from typing import List - - def baz() -> List[Foo]: - pass - """ - after = """ + after = ( + before + ) = """ # This is some header comment. # # pyre-strict @@ -59,17 +46,9 @@ class TestFixPyreDirectivesCommand(CodemodTest): """ Tests that a pyre-strict inside the module header doesn't get touched. """ - before = """ - # pyre-strict - # - # This is some header comment. - - from typing import List - - def baz() -> List[Foo]: - pass - """ - after = """ + after = ( + before + ) = """ # pyre-strict # # This is some header comment. diff --git a/libcst/codemod/commands/tests/test_fix_variadic_callable.py b/libcst/codemod/commands/tests/test_fix_variadic_callable.py new file mode 100644 index 00000000..848f0c98 --- /dev/null +++ b/libcst/codemod/commands/tests/test_fix_variadic_callable.py @@ -0,0 +1,92 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# pyre-strict + +from libcst.codemod import CodemodTest +from libcst.codemod.commands.fix_variadic_callable import FixVariadicCallableCommmand + + +class TestFixVariadicCallableCommmand(CodemodTest): + TRANSFORM = FixVariadicCallableCommmand + + def test_callable_typing(self) -> None: + before = """ + from typing import Callable + x: Callable[[...], int] = ... + """ + after = """ + from typing import Callable + x: Callable[..., int] = ... + """ + self.assertCodemod(before, after) + + def test_callable_typing_alias(self) -> None: + before = """ + import typing as t + x: t.Callable[[...], int] = ... + """ + after = """ + import typing as t + x: t.Callable[..., int] = ... + """ + self.assertCodemod(before, after) + + def test_callable_import_alias(self) -> None: + before = """ + from typing import Callable as C + x: C[[...], int] = ... + """ + after = """ + from typing import Callable as C + x: C[..., int] = ... + """ + self.assertCodemod(before, after) + + def test_callable_with_optional(self) -> None: + before = """ + from typing import Callable + def foo(bar: Optional[Callable[[...], int]]) -> Callable[[...], int]: + ... + """ + after = """ + from typing import Callable + def foo(bar: Optional[Callable[..., int]]) -> Callable[..., int]: + ... + """ + self.assertCodemod(before, after) + + def test_callable_with_arguments(self) -> None: + before = """ + from typing import Callable + x: Callable[[int], int] + """ + after = """ + from typing import Callable + x: Callable[[int], int] + """ + self.assertCodemod(before, after) + + def test_callable_with_variadic_arguments(self) -> None: + before = """ + from typing import Callable + x: Callable[[int, int, ...], int] + """ + after = """ + from typing import Callable + x: Callable[[int, int, ...], int] + """ + self.assertCodemod(before, after) + + def test_callable_no_arguments(self) -> None: + before = """ + from typing import Callable + x: Callable + """ + after = """ + from typing import Callable + x: Callable + """ + self.assertCodemod(before, after) diff --git a/libcst/codemod/commands/tests/test_noop.py b/libcst/codemod/commands/tests/test_noop.py index 827f3a3c..fa586a3d 100644 --- a/libcst/codemod/commands/tests/test_noop.py +++ b/libcst/codemod/commands/tests/test_noop.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,6 @@ from libcst.codemod.commands.noop import NOOPCommand class TestNOOPCodemod(CodemodTest): - TRANSFORM = NOOPCommand def test_noop(self) -> None: diff --git a/libcst/codemod/commands/tests/test_remove_pyre_directive.py b/libcst/codemod/commands/tests/test_remove_pyre_directive.py index 06ccb8f8..c99f88ce 100644 --- a/libcst/codemod/commands/tests/test_remove_pyre_directive.py +++ b/libcst/codemod/commands/tests/test_remove_pyre_directive.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -11,7 +11,6 @@ from libcst.codemod.commands.remove_pyre_directive import ( class TestRemovePyreStrictCommand(CodemodTest): - TRANSFORM = RemovePyreStrictCommand def test_remove_from_file(self) -> None: @@ -97,7 +96,6 @@ class TestRemovePyreStrictCommand(CodemodTest): class TestRemovePyreUnsafeCommand(CodemodTest): - TRANSFORM = RemovePyreUnsafeCommand def test_remove_from_file(self) -> None: diff --git a/libcst/codemod/commands/tests/test_remove_unused_imports.py b/libcst/codemod/commands/tests/test_remove_unused_imports.py index 23b1c727..dc2b3366 100644 --- a/libcst/codemod/commands/tests/test_remove_unused_imports.py +++ b/libcst/codemod/commands/tests/test_remove_unused_imports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/tests/test_rename.py b/libcst/codemod/commands/tests/test_rename.py index aa95801e..5b6e0128 100644 --- a/libcst/codemod/commands/tests/test_rename.py +++ b/libcst/codemod/commands/tests/test_rename.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -10,11 +10,9 @@ from libcst.codemod.commands.rename import RenameCommand class TestRenameCommand(CodemodTest): - TRANSFORM = RenameCommand def test_rename_name(self) -> None: - before = """ from foo import bar @@ -30,8 +28,20 @@ class TestRenameCommand(CodemodTest): self.assertCodemod(before, after, old_name="foo.bar", new_name="baz.qux") - def test_rename_name_asname(self) -> None: + def test_rename_to_builtin(self) -> None: + before = """ + from typing import List + x: List[int] = [] + """ + after = """ + x: list[int] = [] + """ + self.assertCodemod( + before, after, old_name="typing.List", new_name="builtins.list" + ) + + def test_rename_name_asname(self) -> None: before = """ from foo import bar as bla @@ -73,7 +83,6 @@ class TestRenameCommand(CodemodTest): ) def test_rename_attr(self) -> None: - before = """ import a.b @@ -95,7 +104,6 @@ class TestRenameCommand(CodemodTest): ) def test_rename_attr_asname(self) -> None: - before = """ import foo as bar @@ -116,6 +124,27 @@ class TestRenameCommand(CodemodTest): new_name="baz.quux", ) + def test_rename_attr_asname_2(self) -> None: + before = """ + import foo.qux as bar + + def test() -> None: + bar.z(5) + """ + after = """ + import baz.quux + + def test() -> None: + baz.quux.z(5) + """ + + self.assertCodemod( + before, + after, + old_name="foo.qux", + new_name="baz.quux", + ) + def test_rename_module_import(self) -> None: before = """ import a.b @@ -280,6 +309,38 @@ class TestRenameCommand(CodemodTest): new_name="a.b.module_3.Class_3", ) + def test_import_same_module(self) -> None: + before = """ + import logging + logging.warn(1) + """ + after = """ + import logging + logging.warning(1) + """ + self.assertCodemod( + before, + after, + old_name="logging.warn", + new_name="logging.warning", + ) + + def test_import_same_dotted_module(self) -> None: + before = """ + import a.b + a.b.warn(1) + """ + after = """ + import a.b + a.b.warning(1) + """ + self.assertCodemod( + before, + after, + old_name="a.b.warn", + new_name="a.b.warning", + ) + def test_rename_local_variable(self) -> None: before = """ x = 5 @@ -334,6 +395,28 @@ class TestRenameCommand(CodemodTest): new_name="d.z", ) + def test_comma_import(self) -> None: + before = """ + import a, b, c + + class Foo(a.z): + bar: b.bar + baz: c.baz + """ + after = """ + import a, b, d + + class Foo(a.z): + bar: b.bar + baz: d.baz + """ + self.assertCodemod( + before, + after, + old_name="c.baz", + new_name="d.baz", + ) + def test_other_import_froms_untouched(self) -> None: before = """ from a import b, c, d @@ -357,6 +440,61 @@ class TestRenameCommand(CodemodTest): new_name="f.b", ) + def test_comma_import_from(self) -> None: + before = """ + from a import b, c, d + + class Foo(b): + bar: c.bar + baz: d.baz + """ + after = """ + from a import b, c + from f import d + + class Foo(b): + bar: c.bar + baz: d.baz + """ + self.assertCodemod( + before, + after, + old_name="a.d", + new_name="f.d", + ) + + def test_comma_import_from_parens(self) -> None: + before = """ + from a import ( + b, + c, + d, + ) + from x import (y,) + + class Foo(b): + bar: c.bar + baz: d.baz + """ + after = """ + from a import ( + b, + c, + ) + from x import (y,) + from f import d + + class Foo(b): + bar: c.bar + baz: d.baz + """ + self.assertCodemod( + before, + after, + old_name="a.d", + new_name="f.d", + ) + def test_no_removal_of_import_in_use(self) -> None: before = """ import a @@ -660,3 +798,90 @@ class TestRenameCommand(CodemodTest): bar(42) """ self.assertCodemod(before, before, old_name="baz.bar", new_name="qux.bar") + + def test_rename_single_with_colon(self) -> None: + before = """ + from a.b import qux + + print(qux) + """ + after = """ + from a import b + + print(b.qux) + """ + self.assertCodemod( + before, + after, + old_name="a.b.qux", + new_name="a:b.qux", + ) + + def test_import_parent_module(self) -> None: + before = """ + import a + a.b.c(a.b.c.d) + """ + after = """ + from z import c + + c(c.d) + """ + self.assertCodemod(before, after, old_name="a.b.c", new_name="z.c") + + def test_import_parent_module_2(self) -> None: + before = """ + import a.b + a.b.c.d(a.b.c.d.x) + """ + after = """ + from z import c + + c(c.x) + """ + self.assertCodemod(before, after, old_name="a.b.c.d", new_name="z.c") + + def test_import_parent_module_3(self) -> None: + before = """ + import a + a.b.c(a.b.c.d) + """ + after = """ + import z.c + + z.c(z.c.d) + """ + self.assertCodemod(before, after, old_name="a.b.c", new_name="z.c:") + + def test_import_parent_module_asname(self) -> None: + before = """ + import a.b as alias + alias.c(alias.c.d) + """ + after = """ + import z + z.c(z.c.d) + """ + self.assertCodemod(before, after, old_name="a.b.c", new_name="z.c") + + def test_push_down_toplevel_names(self) -> None: + before = """ + import foo + foo.baz() + """ + after = """ + import quux.foo + quux.foo.baz() + """ + self.assertCodemod(before, after, old_name="foo", new_name="quux.foo") + + def test_push_down_toplevel_names_with_asname(self) -> None: + before = """ + import foo as bar + bar.baz() + """ + after = """ + import quux.foo + quux.foo.baz() + """ + self.assertCodemod(before, after, old_name="foo", new_name="quux.foo") diff --git a/libcst/codemod/commands/tests/test_rename_typing_generic_aliases.py b/libcst/codemod/commands/tests/test_rename_typing_generic_aliases.py new file mode 100644 index 00000000..7a0a83c3 --- /dev/null +++ b/libcst/codemod/commands/tests/test_rename_typing_generic_aliases.py @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# pyre-strict + +from libcst.codemod import CodemodTest +from libcst.codemod.commands.rename_typing_generic_aliases import ( + RenameTypingGenericAliases, +) + + +class TestRenameCommand(CodemodTest): + TRANSFORM = RenameTypingGenericAliases + + def test_rename_typing_generic_alias(self) -> None: + before = """ + from typing import List, Set, Dict, FrozenSet, Tuple + x: List[int] = [] + y: Set[int] = set() + z: Dict[str, int] = {} + a: FrozenSet[str] = frozenset() + b: Tuple[int, str] = (1, "hello") + """ + after = """ + x: list[int] = [] + y: set[int] = set() + z: dict[str, int] = {} + a: frozenset[str] = frozenset() + b: tuple[int, str] = (1, "hello") + """ + self.assertCodemod(before, after) diff --git a/libcst/codemod/commands/tests/test_strip_strings_from_types.py b/libcst/codemod/commands/tests/test_strip_strings_from_types.py index 3ec292b9..3c3893b1 100644 --- a/libcst/codemod/commands/tests/test_strip_strings_from_types.py +++ b/libcst/codemod/commands/tests/test_strip_strings_from_types.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,6 @@ from libcst.codemod.commands.strip_strings_from_types import StripStringsCommand class TestStripStringsCodemod(CodemodTest): - TRANSFORM = StripStringsCommand def test_noop(self) -> None: diff --git a/libcst/codemod/commands/tests/test_unnecessary_format_string.py b/libcst/codemod/commands/tests/test_unnecessary_format_string.py index ebf1977a..e980bd38 100644 --- a/libcst/codemod/commands/tests/test_unnecessary_format_string.py +++ b/libcst/codemod/commands/tests/test_unnecessary_format_string.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/commands/unnecessary_format_string.py b/libcst/codemod/commands/unnecessary_format_string.py index a539d213..2320af17 100644 --- a/libcst/codemod/commands/unnecessary_format_string.py +++ b/libcst/codemod/commands/unnecessary_format_string.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,7 +9,6 @@ from libcst.codemod import VisitorBasedCodemodCommand class UnnecessaryFormatString(VisitorBasedCodemodCommand): - DESCRIPTION: str = ( "Converts f-strings which perform no formatting to regular strings." ) diff --git a/libcst/codemod/tests/__init__.py b/libcst/codemod/tests/__init__.py index 602d2685..aac70d45 100644 --- a/libcst/codemod/tests/__init__.py +++ b/libcst/codemod/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/tests/codemod_formatter_error_input.py.txt b/libcst/codemod/tests/codemod_formatter_error_input.py.txt index c83c175a..02cef44e 100644 --- a/libcst/codemod/tests/codemod_formatter_error_input.py.txt +++ b/libcst/codemod/tests/codemod_formatter_error_input.py.txt @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/tests/test_cli.py b/libcst/codemod/tests/test_cli.py deleted file mode 100644 index a4d1404f..00000000 --- a/libcst/codemod/tests/test_cli.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# -from typing import Optional - -from libcst.codemod._cli import _calculate_module -from libcst.testing.utils import UnitTest, data_provider - - -class TestPackageCalculation(UnitTest): - @data_provider( - ( - # Providing no root should give back no module. - (None, "/some/dummy/file.py", None), - # Providing a file outside the root should give back no module. - ("/home/username/root", "/some/dummy/file.py", None), - ("/home/username/root/", "/some/dummy/file.py", None), - ("/home/username/root", "/home/username/file.py", None), - # Various files inside the root should give back valid modules. - ("/home/username/root", "/home/username/root/file.py", "file"), - ("/home/username/root/", "/home/username/root/file.py", "file"), - ( - "/home/username/root/", - "/home/username/root/some/dir/file.py", - "some.dir.file", - ), - # Various special files inside the root should give back valid modules. - ( - "/home/username/root/", - "/home/username/root/some/dir/__init__.py", - "some.dir", - ), - ( - "/home/username/root/", - "/home/username/root/some/dir/__main__.py", - "some.dir", - ), - ), - ) - def test_calculate_module( - self, repo_root: Optional[str], filename: str, module: str - ) -> None: - self.assertEqual(_calculate_module(repo_root, filename), module) diff --git a/libcst/codemod/tests/test_codemod.py b/libcst/codemod/tests/test_codemod.py index 4fb5dbaa..ba68b94b 100644 --- a/libcst/codemod/tests/test_codemod.py +++ b/libcst/codemod/tests/test_codemod.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -23,7 +23,6 @@ class SimpleCodemod(Codemod): class TestSkipDetection(CodemodTest): - TRANSFORM = SimpleCodemod def test_detect_skip(self) -> None: @@ -87,7 +86,6 @@ class IncrementCodemod(Codemod): class TestMultipass(CodemodTest): - TRANSFORM = IncrementCodemod def test_multi_iterations(self) -> None: diff --git a/libcst/codemod/tests/test_codemod_cli.py b/libcst/codemod/tests/test_codemod_cli.py index 74f1c174..9798b071 100644 --- a/libcst/codemod/tests/test_codemod_cli.py +++ b/libcst/codemod/tests/test_codemod_cli.py @@ -1,38 +1,119 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # +import platform import subprocess import sys +import tempfile +from pathlib import Path +from unittest import skipIf +from libcst.codemod import CodemodTest from libcst.testing.utils import UnitTest class TestCodemodCLI(UnitTest): + # pyre-ignore - no idea why pyre is complaining about this + @skipIf(platform.system() == "Windows", "Windows") def test_codemod_formatter_error_input(self) -> None: rlt = subprocess.run( [ - "python", + sys.executable, "-m", "libcst.tool", "codemod", "remove_unused_imports.RemoveUnusedImportsCommand", - "libcst/codemod/tests/codemod_formatter_error_input.py.txt", + # `ArgumentParser.parse_known_args()`'s behavior dictates that options + # need to go after instead of before the codemod command identifier. + "--python-version", + "3.6", + str(Path(__file__).parent / "codemod_formatter_error_input.py.txt"), ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) - version = sys.version_info - if version[0] == 3 and version[1] == 6: - self.assertIn( - "ParserSyntaxError: Syntax Error @ 14:11.", - rlt.stderr.decode("utf-8"), + self.assertIn( + "error: cannot format -: Cannot parse for target version Python 3.6: 13:10: async with AsyncExitStack() as stack:", + rlt.stderr.decode("utf-8"), + ) + + def test_codemod_external(self) -> None: + # Test running the NOOP command as an "external command" + # against this very file. + output = subprocess.check_output( + [ + sys.executable, + "-m", + "libcst.tool", + "codemod", + "-x", # external module + "libcst.codemod.commands.noop.NOOPCommand", + str(Path(__file__)), + ], + encoding="utf-8", + stderr=subprocess.STDOUT, + ) + assert "Finished codemodding 1 files!" in output + + def test_warning_messages_several_files(self) -> None: + code = """ + def baz() -> str: + return "{}: {}".format(*baz) + """ + with tempfile.TemporaryDirectory() as tmpdir: + p = Path(tmpdir) + (p / "mod1.py").write_text(CodemodTest.make_fixture_data(code)) + (p / "mod2.py").write_text(CodemodTest.make_fixture_data(code)) + (p / "mod3.py").write_text(CodemodTest.make_fixture_data(code)) + output = subprocess.run( + [ + sys.executable, + "-m", + "libcst.tool", + "codemod", + "convert_format_to_fstring.ConvertFormatStringCommand", + str(p), + ], + encoding="utf-8", + stderr=subprocess.PIPE, ) - else: + # Each module will generate a warning, so we should get 3 warnings in total self.assertIn( - "error: cannot format -: Cannot parse: 13:10: async with AsyncExitStack() as stack:", - rlt.stderr.decode("utf-8"), + "- 3 warnings were generated.", + output.stderr, + ) + + def test_matcher_decorators_multiprocessing(self) -> None: + file_count = 5 + code = """ + def baz(): # type: int + return 5 + """ + with tempfile.TemporaryDirectory() as tmpdir: + p = Path(tmpdir) + # Using more than chunksize=4 files to trigger multiprocessing + for i in range(file_count): + (p / f"mod{i}.py").write_text(CodemodTest.make_fixture_data(code)) + output = subprocess.run( + [ + sys.executable, + "-m", + "libcst.tool", + "codemod", + # Good candidate since it uses matcher decorators + "convert_type_comments.ConvertTypeComments", + str(p), + "--jobs", + str(file_count), + ], + encoding="utf-8", + stderr=subprocess.PIPE, + ) + self.assertIn( + f"Transformed {file_count} files successfully.", + output.stderr, ) diff --git a/libcst/codemod/tests/test_command_helpers.py b/libcst/codemod/tests/test_command_helpers.py new file mode 100644 index 00000000..15c461b6 --- /dev/null +++ b/libcst/codemod/tests/test_command_helpers.py @@ -0,0 +1,325 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +from typing import Union + +import libcst as cst +from libcst.codemod import CodemodTest, VisitorBasedCodemodCommand + + +class TestRemoveUnusedImportHelper(CodemodTest): + """Tests for the remove_unused_import helper method in CodemodCommand.""" + + def test_remove_unused_import_simple(self) -> None: + """ + Test that remove_unused_import helper method works correctly. + """ + + class RemoveBarImport(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Use the helper method to schedule removal + self.remove_unused_import("bar") + + before = """ + import bar + import baz + + def foo() -> None: + pass + """ + after = """ + import baz + + def foo() -> None: + pass + """ + + self.TRANSFORM = RemoveBarImport + self.assertCodemod(before, after) + + def test_remove_unused_import_from_simple(self) -> None: + """ + Test that remove_unused_import helper method works correctly with from imports. + """ + + class RemoveBarFromImport(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Use the helper method to schedule removal + self.remove_unused_import("a.b.c", "bar") + + before = """ + from a.b.c import bar, baz + + def foo() -> None: + baz() + """ + after = """ + from a.b.c import baz + + def foo() -> None: + baz() + """ + + self.TRANSFORM = RemoveBarFromImport + self.assertCodemod(before, after) + + def test_remove_unused_import_with_alias(self) -> None: + """ + Test that remove_unused_import helper method works correctly with aliased imports. + """ + + class RemoveBarAsQuxImport(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Use the helper method to schedule removal + self.remove_unused_import("a.b.c", "bar", "qux") + + before = """ + from a.b.c import bar as qux, baz + + def foo() -> None: + baz() + """ + after = """ + from a.b.c import baz + + def foo() -> None: + baz() + """ + + self.TRANSFORM = RemoveBarAsQuxImport + self.assertCodemod(before, after) + + +class TestRemoveUnusedImportByNodeHelper(CodemodTest): + """Tests for the remove_unused_import_by_node helper method in CodemodCommand.""" + + def test_remove_unused_import_by_node_simple(self) -> None: + """ + Test that remove_unused_import_by_node helper method works correctly. + """ + + class RemoveBarCallAndImport(VisitorBasedCodemodCommand): + METADATA_DEPENDENCIES = ( + cst.metadata.QualifiedNameProvider, + cst.metadata.ScopeProvider, + ) + + def leave_SimpleStatementLine( + self, + original_node: cst.SimpleStatementLine, + updated_node: cst.SimpleStatementLine, + ) -> Union[cst.RemovalSentinel, cst.SimpleStatementLine]: + # Remove any statement that calls bar() + if cst.matchers.matches( + updated_node, + cst.matchers.SimpleStatementLine( + body=[cst.matchers.Expr(cst.matchers.Call())] + ), + ): + call = cst.ensure_type(updated_node.body[0], cst.Expr).value + if cst.matchers.matches( + call, cst.matchers.Call(func=cst.matchers.Name("bar")) + ): + # Use the helper method to remove imports referenced by this node + self.remove_unused_import_by_node(original_node) + return cst.RemoveFromParent() + return updated_node + + before = """ + from foo import bar, baz + + def fun() -> None: + bar() + baz() + """ + after = """ + from foo import baz + + def fun() -> None: + baz() + """ + + self.TRANSFORM = RemoveBarCallAndImport + self.assertCodemod(before, after) + + +class TestAddNeededImportHelper(CodemodTest): + """Tests for the add_needed_import helper method in CodemodCommand.""" + + def test_add_needed_import_simple(self) -> None: + """ + Test that add_needed_import helper method works correctly. + """ + + class AddBarImport(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Use the helper method to schedule import addition + self.add_needed_import("bar") + + before = """ + def foo() -> None: + pass + """ + after = """ + import bar + + def foo() -> None: + pass + """ + + self.TRANSFORM = AddBarImport + self.assertCodemod(before, after) + + def test_add_needed_import_from_simple(self) -> None: + """ + Test that add_needed_import helper method works correctly with from imports. + """ + + class AddBarFromImport(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Use the helper method to schedule import addition + self.add_needed_import("a.b.c", "bar") + + before = """ + def foo() -> None: + pass + """ + after = """ + from a.b.c import bar + + def foo() -> None: + pass + """ + + self.TRANSFORM = AddBarFromImport + self.assertCodemod(before, after) + + def test_add_needed_import_with_alias(self) -> None: + """ + Test that add_needed_import helper method works correctly with aliased imports. + """ + + class AddBarAsQuxImport(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Use the helper method to schedule import addition + self.add_needed_import("a.b.c", "bar", "qux") + + before = """ + def foo() -> None: + pass + """ + after = """ + from a.b.c import bar as qux + + def foo() -> None: + pass + """ + + self.TRANSFORM = AddBarAsQuxImport + self.assertCodemod(before, after) + + def test_add_needed_import_relative(self) -> None: + """ + Test that add_needed_import helper method works correctly with relative imports. + """ + + class AddRelativeImport(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Use the helper method to schedule relative import addition + self.add_needed_import("c", "bar", relative=2) + + before = """ + def foo() -> None: + pass + """ + after = """ + from ..c import bar + + def foo() -> None: + pass + """ + + self.TRANSFORM = AddRelativeImport + self.assertCodemod(before, after) + + +class TestCombinedHelpers(CodemodTest): + """Tests for combining add_needed_import and remove_unused_import helper methods.""" + + def test_add_and_remove_imports(self) -> None: + """ + Test that both helper methods work correctly when used together. + """ + + class ReplaceBarWithBaz(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Add new import and remove old one + self.add_needed_import("new_module", "baz") + self.remove_unused_import("old_module", "bar") + + before = """ + from other_module import qux + from old_module import bar + + def foo() -> None: + pass + """ + after = """ + from other_module import qux + from new_module import baz + + def foo() -> None: + pass + """ + + self.TRANSFORM = ReplaceBarWithBaz + self.assertCodemod(before, after) + + def test_add_and_remove_same_import(self) -> None: + """ + Test that both helper methods work correctly when used together. + """ + + class AddAndRemoveBar(VisitorBasedCodemodCommand): + def visit_Module(self, node: cst.Module) -> None: + # Add new import and remove old one + self.add_needed_import("hello_module", "bar") + self.remove_unused_import("hello_module", "bar") + + self.TRANSFORM = AddAndRemoveBar + + before = """ + from other_module import baz + + def foo() -> None: + pass + """ + # Should remain unchanged + self.assertCodemod(before, before) + + before = """ + from other_module import baz + from hello_module import bar + + def foo() -> None: + bar.func() + """ + self.assertCodemod(before, before) + + before = """ + from other_module import baz + from hello_module import bar + + def foo() -> None: + pass + """ + + after = """ + from other_module import baz + + def foo() -> None: + pass + """ + self.assertCodemod(before, after) diff --git a/libcst/codemod/tests/test_metadata.py b/libcst/codemod/tests/test_metadata.py index acd25bf4..eff3a228 100644 --- a/libcst/codemod/tests/test_metadata.py +++ b/libcst/codemod/tests/test_metadata.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -13,7 +13,6 @@ from libcst.testing.utils import UnitTest class TestingCollector(ContextAwareVisitor): - METADATA_DEPENDENCIES = (PositionProvider,) def visit_Pass(self, node: cst.Pass) -> None: @@ -22,7 +21,6 @@ class TestingCollector(ContextAwareVisitor): class TestingTransform(ContextAwareTransformer): - METADATA_DEPENDENCIES = (PositionProvider,) def visit_FunctionDef(self, node: cst.FunctionDef) -> None: diff --git a/libcst/codemod/tests/test_runner.py b/libcst/codemod/tests/test_runner.py index 07673055..2a714e07 100644 --- a/libcst/codemod/tests/test_runner.py +++ b/libcst/codemod/tests/test_runner.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -12,11 +12,11 @@ from libcst.codemod import ( CodemodContext, CodemodTest, SkipFile, + transform_module, TransformExit, TransformFailure, TransformSkip, TransformSuccess, - transform_module, ) diff --git a/libcst/codemod/visitors/__init__.py b/libcst/codemod/visitors/__init__.py index bcc570be..632d6fa6 100644 --- a/libcst/codemod/visitors/__init__.py +++ b/libcst/codemod/visitors/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,21 +7,24 @@ from libcst.codemod.visitors._add_imports import AddImportsVisitor from libcst.codemod.visitors._apply_type_annotations import ApplyTypeAnnotationsVisitor from libcst.codemod.visitors._gather_comments import GatherCommentsVisitor from libcst.codemod.visitors._gather_exports import GatherExportsVisitor +from libcst.codemod.visitors._gather_global_names import GatherGlobalNamesVisitor from libcst.codemod.visitors._gather_imports import GatherImportsVisitor from libcst.codemod.visitors._gather_string_annotation_names import ( GatherNamesFromStringAnnotationsVisitor, ) from libcst.codemod.visitors._gather_unused_imports import GatherUnusedImportsVisitor +from libcst.codemod.visitors._imports import ImportItem from libcst.codemod.visitors._remove_imports import RemoveImportsVisitor - __all__ = [ "AddImportsVisitor", "ApplyTypeAnnotationsVisitor", "GatherCommentsVisitor", "GatherExportsVisitor", + "GatherGlobalNamesVisitor", "GatherImportsVisitor", "GatherNamesFromStringAnnotationsVisitor", "GatherUnusedImportsVisitor", + "ImportItem", "RemoveImportsVisitor", ] diff --git a/libcst/codemod/visitors/_add_imports.py b/libcst/codemod/visitors/_add_imports.py index ccafc9c2..eeab43ae 100644 --- a/libcst/codemod/visitors/_add_imports.py +++ b/libcst/codemod/visitors/_add_imports.py @@ -1,17 +1,58 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -# + from collections import defaultdict from typing import Dict, List, Optional, Sequence, Set, Tuple, Union import libcst -from libcst import matchers as m, parse_statement +from libcst import CSTLogicError, matchers as m, parse_statement +from libcst._nodes.statement import Import, ImportFrom, SimpleStatementLine from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareTransformer -from libcst.codemod.visitors._gather_imports import GatherImportsVisitor -from libcst.helpers import get_absolute_module_for_import +from libcst.codemod.visitors._gather_imports import _GatherImportsMixin +from libcst.codemod.visitors._imports import ImportItem +from libcst.helpers import get_absolute_module_from_package_for_import +from libcst.helpers.common import ensure_type + + +class _GatherTopImportsBeforeStatements(_GatherImportsMixin): + """ + Works similarly to GatherImportsVisitor, but only considers imports + declared before any other statements of the module with the exception + of docstrings and __strict__ flag. + """ + + def __init__(self, context: CodemodContext) -> None: + super().__init__(context) + # Track all of the imports found in this transform + self.all_imports: List[Union[libcst.Import, libcst.ImportFrom]] = [] + + def leave_Module(self, original_node: libcst.Module) -> None: + start = 1 if _skip_first(original_node) else 0 + for stmt in original_node.body[start:]: + if m.matches( + stmt, + m.SimpleStatementLine(body=[m.ImportFrom() | m.Import()]), + ): + stmt = ensure_type(stmt, SimpleStatementLine) + # Workaround for python 3.8 and 3.9, won't accept Union for isinstance + if m.matches(stmt.body[0], m.ImportFrom()): + imp = ensure_type(stmt.body[0], ImportFrom) + self.all_imports.append(imp) + if m.matches(stmt.body[0], m.Import()): + imp = ensure_type(stmt.body[0], Import) + self.all_imports.append(imp) + else: + break + for imp in self.all_imports: + if m.matches(imp, m.Import()): + imp = ensure_type(imp, Import) + self._handle_Import(imp) + else: + imp = ensure_type(imp, ImportFrom) + self._handle_ImportFrom(imp) class AddImportsVisitor(ContextAwareTransformer): @@ -63,10 +104,10 @@ class AddImportsVisitor(ContextAwareTransformer): @staticmethod def _get_imports_from_context( context: CodemodContext, - ) -> List[Tuple[str, Optional[str], Optional[str]]]: + ) -> List[ImportItem]: imports = context.scratch.get(AddImportsVisitor.CONTEXT_KEY, []) if not isinstance(imports, list): - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") return imports @staticmethod @@ -75,6 +116,7 @@ class AddImportsVisitor(ContextAwareTransformer): module: str, obj: Optional[str] = None, asname: Optional[str] = None, + relative: int = 0, ) -> None: """ Schedule an import to be added in a future invocation of this class by @@ -94,80 +136,85 @@ class AddImportsVisitor(ContextAwareTransformer): """ if module == "__future__" and obj is None: - raise Exception("Cannot import __future__ directly!") + raise ValueError("Cannot import __future__ directly!") imports = AddImportsVisitor._get_imports_from_context(context) - imports.append((module, obj, asname)) + imports.append(ImportItem(module, obj, asname, relative)) context.scratch[AddImportsVisitor.CONTEXT_KEY] = imports def __init__( self, context: CodemodContext, - imports: Sequence[Tuple[str, Optional[str], Optional[str]]] = (), + imports: Sequence[ImportItem] = (), ) -> None: # Allow for instantiation from either a context (used when multiple transforms # get chained) or from a direct instantiation. super().__init__(context) - imports: List[Tuple[str, Optional[str], Optional[str]]] = [ + imps: List[ImportItem] = [ *AddImportsVisitor._get_imports_from_context(context), *imports, ] # Verify that the imports are valid - for module, obj, alias in imports: - if module == "__future__" and obj is None: - raise Exception("Cannot import __future__ directly!") - if module == "__future__" and alias is not None: - raise Exception("Cannot import __future__ objects with aliases!") + for imp in imps: + if imp.module == "__future__" and imp.obj_name is None: + raise ValueError("Cannot import __future__ directly!") + if imp.module == "__future__" and imp.alias is not None: + raise ValueError("Cannot import __future__ objects with aliases!") + + # Resolve relative imports if we have a module name + imps = [imp.resolve_relative(self.context.full_package_name) for imp in imps] # List of modules we need to ensure are imported self.module_imports: Set[str] = { - module for (module, obj, alias) in imports if obj is None and alias is None + imp.module for imp in imps if imp.obj_name is None and imp.alias is None } # List of modules we need to check for object imports on from_imports: Set[str] = { - module - for (module, obj, alias) in imports - if obj is not None and alias is None + imp.module for imp in imps if imp.obj_name is not None and imp.alias is None } # Mapping of modules we're adding to the object they should import self.module_mapping: Dict[str, Set[str]] = { module: { - o - for (m, o, n) in imports - if m == module and o is not None and n is None + imp.obj_name + for imp in imps + if imp.module == module + and imp.obj_name is not None + and imp.alias is None } for module in sorted(from_imports) } # List of aliased modules we need to ensure are imported self.module_aliases: Dict[str, str] = { - module: alias - for (module, obj, alias) in imports - if obj is None and alias is not None + imp.module: imp.alias + for imp in imps + if imp.obj_name is None and imp.alias is not None } # List of modules we need to check for object imports on from_imports_aliases: Set[str] = { - module - for (module, obj, alias) in imports - if obj is not None and alias is not None + imp.module + for imp in imps + if imp.obj_name is not None and imp.alias is not None } # Mapping of modules we're adding to the object with alias they should import self.alias_mapping: Dict[str, List[Tuple[str, str]]] = { module: [ - (o, n) - for (m, o, n) in imports - if m == module and o is not None and n is not None + (imp.obj_name, imp.alias) + for imp in imps + if imp.module == module + and imp.obj_name is not None + and imp.alias is not None ] for module in sorted(from_imports_aliases) } - # Track the list of imports found in the file + # Track the list of imports found at the top of the file self.all_imports: List[Union[libcst.Import, libcst.ImportFrom]] = [] def visit_Module(self, node: libcst.Module) -> None: - # Do a preliminary pass to gather the imports we already have - gatherer = GatherImportsVisitor(self.context) + # Do a preliminary pass to gather the imports we already have at the top + gatherer = _GatherTopImportsBeforeStatements(self.context) node.visit(gatherer) self.all_imports = gatherer.all_imports @@ -176,7 +223,7 @@ class AddImportsVisitor(ContextAwareTransformer): if module in self.module_aliases and self.module_aliases[module] == alias: del self.module_aliases[module] for module, aliases in gatherer.alias_mapping.items(): - for (obj, alias) in aliases: + for obj, alias in aliases: if ( module in self.alias_mapping and (obj, alias) in self.alias_mapping[module] @@ -206,9 +253,13 @@ class AddImportsVisitor(ContextAwareTransformer): # There's nothing to do here! return updated_node + # Ensure this is one of the imports at the top + if original_node not in self.all_imports: + return updated_node + # Get the module we're importing as a string, see if we have work to do. - module = get_absolute_module_for_import( - self.context.full_module_name, updated_node + module = get_absolute_module_from_package_for_import( + self.context.full_package_name, updated_node ) if ( module is None @@ -253,39 +304,26 @@ class AddImportsVisitor(ContextAwareTransformer): statement_before_import_location = 0 import_add_location = 0 - # never insert an import before initial __strict__ flag - if m.matches( - orig_module, - m.Module( - body=[ - m.SimpleStatementLine( - body=[ - m.Assign( - targets=[m.AssignTarget(target=m.Name("__strict__"))] - ) - ] - ), - m.ZeroOrMore(), - ] - ), - ): - statement_before_import_location = import_add_location = 1 - # This works under the principle that while we might modify node contents, # we have yet to modify the number of statements. So we can match on the # original tree but break up the statements of the modified tree. If we # change this assumption in this visitor, we will have to change this code. - for i, statement in enumerate(orig_module.body): + + # Finds the location to add imports. It is the end of the first import block that occurs before any other statement (save for docstrings) + + # Never insert an import before initial __strict__ flag or docstring + if _skip_first(orig_module): + statement_before_import_location = import_add_location = 1 + + for i, statement in enumerate( + orig_module.body[statement_before_import_location:] + ): if m.matches( - statement, m.SimpleStatementLine(body=[m.Expr(value=m.SimpleString())]) + statement, m.SimpleStatementLine(body=[m.ImportFrom() | m.Import()]) ): - statement_before_import_location = import_add_location = 1 - elif isinstance(statement, libcst.SimpleStatementLine): - for possible_import in statement.body: - for last_import in self.all_imports: - if possible_import is last_import: - import_add_location = i + 1 - break + import_add_location = i + statement_before_import_location + 1 + else: + break return ( list(updated_module.body[:statement_before_import_location]), @@ -357,9 +395,9 @@ class AddImportsVisitor(ContextAwareTransformer): module: sorted(aliases) for module, aliases in module_and_alias_mapping.items() } - # import ptvsd; ptvsd.set_trace() # Now, add all of the imports we need! return updated_node.with_changes( + # pyre-fixme[60]: Concatenation not yet support for multiple variadic tup... body=( *statements_before_imports, *[ @@ -407,3 +445,28 @@ class AddImportsVisitor(ContextAwareTransformer): *statements_after_imports, ) ) + + +def _skip_first(orig_module: libcst.Module) -> bool: + # Is there a __strict__ flag or docstring at the top? + if m.matches( + orig_module, + m.Module( + body=[ + m.SimpleStatementLine( + body=[ + m.Assign(targets=[m.AssignTarget(target=m.Name("__strict__"))]) + ] + ), + m.ZeroOrMore(), + ] + ) + | m.Module( + body=[ + m.SimpleStatementLine(body=[m.Expr(value=m.SimpleString())]), + m.ZeroOrMore(), + ] + ), + ): + return True + return False diff --git a/libcst/codemod/visitors/_apply_type_annotations.py b/libcst/codemod/visitors/_apply_type_annotations.py index 2090c151..59347420 100644 --- a/libcst/codemod/visitors/_apply_type_annotations.py +++ b/libcst/codemod/visitors/_apply_type_annotations.py @@ -1,22 +1,74 @@ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree -# +# LICENSE file in the root directory of this source tree. -from dataclasses import dataclass, field +from collections import defaultdict +from dataclasses import dataclass from typing import Dict, List, Optional, Sequence, Set, Tuple, Union import libcst as cst -from libcst import matchers as m +import libcst.matchers as m + from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareTransformer from libcst.codemod.visitors._add_imports import AddImportsVisitor +from libcst.codemod.visitors._gather_global_names import GatherGlobalNamesVisitor from libcst.codemod.visitors._gather_imports import GatherImportsVisitor +from libcst.codemod.visitors._imports import ImportItem from libcst.helpers import get_full_name_for_node +from libcst.metadata import PositionProvider, QualifiedNameProvider -def _get_import_alias_names(import_aliases: Sequence[cst.ImportAlias]) -> Set[str]: +NameOrAttribute = Union[cst.Name, cst.Attribute] +NAME_OR_ATTRIBUTE = (cst.Name, cst.Attribute) +# Union type for *args and **args +StarParamType = Union[ + None, + cst._maybe_sentinel.MaybeSentinel, + cst._nodes.expression.Param, + cst._nodes.expression.ParamStar, +] + + +def _module_and_target(qualified_name: str) -> Tuple[str, str]: + relative_prefix = "" + while qualified_name.startswith("."): + relative_prefix += "." + qualified_name = qualified_name[1:] + split = qualified_name.rsplit(".", 1) + if len(split) == 1: + qualifier, target = "", split[0] + else: + qualifier, target = split + return (relative_prefix + qualifier, target) + + +def _get_unique_qualified_name( + visitor: m.MatcherDecoratableVisitor, node: cst.CSTNode +) -> str: + name = None + names = [q.name for q in visitor.get_metadata(QualifiedNameProvider, node)] + if len(names) == 0: + # we hit this branch if the stub is directly using a fully + # qualified name, which is not technically valid python but is + # convenient to allow. + name = get_full_name_for_node(node) + elif len(names) == 1 and isinstance(names[0], str): + name = names[0] + if name is None: + start = visitor.get_metadata(PositionProvider, node).start + raise ValueError( + "Could not resolve a unique qualified name for type " + + f"{get_full_name_for_node(node)} at {start.line}:{start.column}. " + + f"Candidate names were: {names!r}" + ) + return name + + +def _get_import_alias_names( + import_aliases: Sequence[cst.ImportAlias], +) -> Set[str]: import_names = set() for imported_name in import_aliases: asname = imported_name.asname @@ -27,7 +79,17 @@ def _get_import_alias_names(import_aliases: Sequence[cst.ImportAlias]) -> Set[st return import_names -def _get_import_names(imports: Sequence[Union[cst.Import, cst.ImportFrom]]) -> Set[str]: +def _get_imported_names( + imports: Sequence[Union[cst.Import, cst.ImportFrom]], +) -> Set[str]: + """ + Given a series of import statements (both Import and ImportFrom), + determine all of the names that have been imported into the current + scope. For example: + - ``import foo.bar as bar, foo.baz`` produces ``{'bar', 'foo.baz'}`` + - ``from foo import (Bar, Baz as B)`` produces ``{'Bar', 'B'}`` + - ``from foo import *`` produces ``set()` because we cannot resolve names + """ import_names = set() for _import in imports: if isinstance(_import, cst.Import): @@ -39,149 +101,428 @@ def _get_import_names(imports: Sequence[Union[cst.Import, cst.ImportFrom]]) -> S return import_names +def _is_non_sentinel( + x: Union[None, cst.CSTNode, cst.MaybeSentinel], +) -> bool: + return x is not None and x != cst.MaybeSentinel.DEFAULT + + +def _get_string_value( + node: cst.SimpleString, +) -> str: + s = node.value + c = s[-1] + return s[s.index(c) : -1] + + +def _find_generic_base( + node: cst.ClassDef, +) -> Optional[cst.Arg]: + for b in node.bases: + if m.matches(b.value, m.Subscript(value=m.Name("Generic"))): + return b + + +@dataclass(frozen=True) +class FunctionKey: + """ + Class representing a funciton name and signature. + + This exists to ensure we do not attempt to apply stubs to functions whose + definition is incompatible. + """ + + name: str + pos: int + kwonly: str + posonly: int + star_arg: bool + star_kwarg: bool + + @classmethod + def make( + cls, + name: str, + params: cst.Parameters, + ) -> "FunctionKey": + pos = len(params.params) + kwonly = ",".join(sorted(x.name.value for x in params.kwonly_params)) + posonly = len(params.posonly_params) + star_arg = _is_non_sentinel(params.star_arg) + star_kwarg = _is_non_sentinel(params.star_kwarg) + return cls( + name, + pos, + kwonly, + posonly, + star_arg, + star_kwarg, + ) + + @dataclass(frozen=True) class FunctionAnnotation: parameters: cst.Parameters returns: Optional[cst.Annotation] -class TypeCollector(cst.CSTVisitor): +@dataclass +class Annotations: + """ + Represents all of the annotation information we might add to + a class: + - All data is keyed on the qualified name relative to the module root + - The ``functions`` field also keys on the signature so that we + do not apply stub types where the signature is incompatible. + + The idea is that + - ``functions`` contains all function and method type + information from the stub, and the qualifier for a method includes + the containing class names (e.g. "Cat.meow") + - ``attributes`` similarly contains all globals + and class-level attribute type information. + - The ``class_definitions`` field contains all of the classes + defined in the stub. Most of these classes will be ignored in + downstream logic (it is *not* used to annotate attributes or + method), but there are some cases like TypedDict where a + typing-only class needs to be injected. + - The field ``typevars`` contains the assign statement for all + type variables in the stub, and ``names`` tracks + all of the names used in annotations; together these fields + tell us which typevars should be included in the codemod + (all typevars that appear in annotations.) + """ + + # TODO: consider simplifying this in a few ways: + # - We could probably just inject all typevars, used or not. + # It doesn't seem to me that our codemod needs to act like + # a linter checking for unused names. + # - We could probably decide which classes are typing-only + # in the visitor rather than the codemod, which would make + # it easier to reason locally about (and document) how the + # class_definitions field works. + + functions: Dict[FunctionKey, FunctionAnnotation] + attributes: Dict[str, cst.Annotation] + class_definitions: Dict[str, cst.ClassDef] + typevars: Dict[str, cst.Assign] + names: Set[str] + + @classmethod + def empty(cls) -> "Annotations": + return Annotations({}, {}, {}, {}, set()) + + def update(self, other: "Annotations") -> None: + self.functions.update(other.functions) + self.attributes.update(other.attributes) + self.class_definitions.update(other.class_definitions) + self.typevars.update(other.typevars) + self.names.update(other.names) + + def finish(self) -> None: + self.typevars = {k: v for k, v in self.typevars.items() if k in self.names} + + +@dataclass(frozen=True) +class ImportedSymbol: + """Import of foo.Bar, where both foo and Bar are potentially aliases.""" + + module_name: str + module_alias: Optional[str] = None + target_name: Optional[str] = None + target_alias: Optional[str] = None + + @property + def symbol(self) -> Optional[str]: + return self.target_alias or self.target_name + + @property + def module_symbol(self) -> str: + return self.module_alias or self.module_name + + +class ImportedSymbolCollector(m.MatcherDecoratableVisitor): + """ + Collect imported symbols from a stub module. + """ + + METADATA_DEPENDENCIES = ( + PositionProvider, + QualifiedNameProvider, + ) + + def __init__(self, existing_imports: Set[str], context: CodemodContext) -> None: + super().__init__() + self.existing_imports: Set[str] = existing_imports + self.imported_symbols: Dict[str, Set[ImportedSymbol]] = defaultdict(set) + self.in_annotation: bool = False + + def visit_Annotation(self, node: cst.Annotation) -> None: + self.in_annotation = True + + def leave_Annotation(self, original_node: cst.Annotation) -> None: + self.in_annotation = False + + def visit_ClassDef(self, node: cst.ClassDef) -> None: + for base in node.bases: + value = base.value + if isinstance(value, NAME_OR_ATTRIBUTE): + self._handle_NameOrAttribute(value) + + def visit_Name(self, node: cst.Name) -> None: + if self.in_annotation: + self._handle_NameOrAttribute(node) + + def visit_Attribute(self, node: cst.Attribute) -> None: + if self.in_annotation: + self._handle_NameOrAttribute(node) + + def visit_Subscript(self, node: cst.Subscript) -> bool: + if isinstance(node.value, NAME_OR_ATTRIBUTE): + return True + return _get_unique_qualified_name(self, node) not in ("Type", "typing.Type") + + def _handle_NameOrAttribute( + self, + node: NameOrAttribute, + ) -> None: + # Adds the qualified name to the list of imported symbols + obj = sym = None # keep pyre happy + if isinstance(node, cst.Name): + obj = None + sym = node.value + elif isinstance(node, cst.Attribute): + obj = node.value.value # pyre-ignore[16] + sym = node.attr.value + qualified_name = _get_unique_qualified_name(self, node) + module, target = _module_and_target(qualified_name) + if module in ("", "builtins"): + return + elif qualified_name not in self.existing_imports: + mod = ImportedSymbol( + module_name=module, + module_alias=obj if obj != module else None, + target_name=target, + target_alias=sym if sym != target else None, + ) + self.imported_symbols[sym].add(mod) + + +class TypeCollector(m.MatcherDecoratableVisitor): """ Collect type annotations from a stub module. """ - def __init__(self, existing_imports: Set[str], context: CodemodContext) -> None: - # Qualifier for storing the canonical name of the current function. - self.qualifier: List[str] = [] - # Store the annotations. - self.function_annotations: Dict[str, FunctionAnnotation] = {} - self.attribute_annotations: Dict[str, cst.Annotation] = {} - self.existing_imports: Set[str] = existing_imports - self.class_definitions: Dict[str, cst.ClassDef] = {} + METADATA_DEPENDENCIES = ( + PositionProvider, + QualifiedNameProvider, + ) + + annotations: Annotations + + def __init__( + self, + existing_imports: Set[str], + module_imports: Dict[str, ImportItem], + context: CodemodContext, + ) -> None: + super().__init__() self.context = context + # Existing imports, determined by looking at the target module. + # Used to help us determine when a type in a stub will require new imports. + # + # The contents of this are fully-qualified names of types in scope + # as well as module names, although downstream we effectively ignore + # the module names as of the current implementation. + self.existing_imports: Set[str] = existing_imports + # Module imports, gathered by prescanning the stub file to determine + # which modules need to be imported directly to qualify their symbols. + self.module_imports: Dict[str, ImportItem] = module_imports + # Fields that help us track temporary state as we recurse + self.qualifier: List[str] = [] + self.current_assign: Optional[cst.Assign] = None # used to collect typevars + # Store the annotations. + self.annotations = Annotations.empty() - def visit_ClassDef(self, node: cst.ClassDef) -> None: + def visit_ClassDef( + self, + node: cst.ClassDef, + ) -> None: self.qualifier.append(node.name.value) - self.class_definitions[node.name.value] = node + new_bases = [] + for base in node.bases: + value = base.value + if isinstance(value, NAME_OR_ATTRIBUTE): + new_value = value.visit(_TypeCollectorDequalifier(self)) + elif isinstance(value, cst.Subscript): + new_value = value.visit(_TypeCollectorDequalifier(self)) + else: + start = self.get_metadata(PositionProvider, node).start + raise ValueError( + "Invalid type used as base class in stub file at " + + f"{start.line}:{start.column}. Only subscripts, names, and " + + "attributes are valid base classes for static typing." + ) + new_bases.append(base.with_changes(value=new_value)) - def leave_ClassDef(self, original_node: cst.ClassDef) -> None: + self.annotations.class_definitions[node.name.value] = node.with_changes( + bases=new_bases + ) + + def leave_ClassDef( + self, + original_node: cst.ClassDef, + ) -> None: self.qualifier.pop() - def visit_FunctionDef(self, node: cst.FunctionDef) -> bool: + def visit_FunctionDef( + self, + node: cst.FunctionDef, + ) -> bool: self.qualifier.append(node.name.value) returns = node.returns - if returns is not None: - return_annotation = self._create_import_from_annotation(returns) - parameter_annotations = self._import_parameter_annotations(node.params) - self.function_annotations[".".join(self.qualifier)] = FunctionAnnotation( - parameters=parameter_annotations, returns=return_annotation - ) + return_annotation = ( + returns.visit(_TypeCollectorDequalifier(self)) + if returns is not None + else None + ) + assert return_annotation is None or isinstance( + return_annotation, cst.Annotation + ) + parameter_annotations = self._handle_Parameters(node.params) + name = ".".join(self.qualifier) + key = FunctionKey.make(name, node.params) + self.annotations.functions[key] = FunctionAnnotation( + parameters=parameter_annotations, returns=return_annotation + ) + # pyi files don't support inner functions, return False to stop the traversal. return False - def leave_FunctionDef(self, original_node: cst.FunctionDef) -> None: + def leave_FunctionDef( + self, + original_node: cst.FunctionDef, + ) -> None: self.qualifier.pop() - def visit_AnnAssign(self, node: cst.AnnAssign) -> bool: + def visit_AnnAssign( + self, + node: cst.AnnAssign, + ) -> bool: name = get_full_name_for_node(node.target) if name is not None: self.qualifier.append(name) - annotation_value = self._create_import_from_annotation(node.annotation) - self.attribute_annotations[".".join(self.qualifier)] = annotation_value + annotation_value = node.annotation.visit(_TypeCollectorDequalifier(self)) + assert isinstance(annotation_value, cst.Annotation) + self.annotations.attributes[".".join(self.qualifier)] = annotation_value return True - def leave_AnnAssign(self, original_node: cst.AnnAssign) -> None: + def leave_AnnAssign( + self, + original_node: cst.AnnAssign, + ) -> None: self.qualifier.pop() - def visit_ImportFrom(self, node: cst.ImportFrom) -> None: - module = node.module - names = node.names + def visit_Assign( + self, + node: cst.Assign, + ) -> None: + self.current_assign = node - # module is None for relative imports like `from .. import foo`. - # We ignore these for now. - if module is None or isinstance(names, cst.ImportStar): - return - module_name = get_full_name_for_node(module) - if module_name is not None: - for import_name in _get_import_alias_names(names): - AddImportsVisitor.add_needed_import( - self.context, module_name, import_name - ) + def leave_Assign( + self, + original_node: cst.Assign, + ) -> None: + self.current_assign = None - def _add_annotation_to_imports( - self, annotation: cst.Attribute - ) -> Union[cst.Name, cst.Attribute]: - key = get_full_name_for_node(annotation.value) - if key is not None: - # Don't attempt to re-import existing imports. - if key in self.existing_imports: - return annotation - import_name = get_full_name_for_node(annotation.attr) - if import_name is not None: - AddImportsVisitor.add_needed_import(self.context, key, import_name) - return annotation.attr + @m.call_if_inside(m.Assign()) + @m.visit(m.Call(func=m.Name("TypeVar"))) + def record_typevar( + self, + node: cst.Call, + ) -> None: + # pyre-ignore current_assign is never None here + name = get_full_name_for_node(self.current_assign.targets[0].target) + if name is not None: + # pyre-ignore current_assign is never None here + self.annotations.typevars[name] = self.current_assign + self._handle_qualification_and_should_qualify("typing.TypeVar") + self.current_assign = None - def _handle_Index(self, slice: cst.Index, node: cst.Subscript) -> cst.Subscript: - value = slice.value - if isinstance(value, cst.Subscript): - new_slice = slice.with_changes(value=self._handle_Subscript(value)) - return node.with_changes(slice=new_slice) - elif isinstance(value, cst.Attribute): - new_slice = slice.with_changes(value=self._add_annotation_to_imports(value)) - return node.with_changes(slice=new_slice) + def leave_Module( + self, + original_node: cst.Module, + ) -> None: + self.annotations.finish() + + def _module_and_target( + self, + qualified_name: str, + ) -> Tuple[str, str]: + relative_prefix = "" + while qualified_name.startswith("."): + relative_prefix += "." + qualified_name = qualified_name[1:] + split = qualified_name.rsplit(".", 1) + if len(split) == 1: + qualifier, target = "", split[0] else: - return node + qualifier, target = split + return (relative_prefix + qualifier, target) - def _handle_Subscript(self, node: cst.Subscript) -> cst.Subscript: - slice = node.slice - if m.matches(node.value, m.Name(value="Type")): - return node - if isinstance(slice, list): - new_slice = [] - for item in slice: - value = item.slice.value - if isinstance(value, cst.Attribute): - name = self._add_annotation_to_imports(item.slice.value) - new_index = item.slice.with_changes(value=name) - new_slice.append(item.with_changes(slice=new_index)) + def _handle_qualification_and_should_qualify( + self, qualified_name: str, node: Optional[cst.CSTNode] = None + ) -> bool: + """ + Based on a qualified name and the existing module imports, record that + we need to add an import if necessary and return whether or not we + should use the qualified name due to a preexisting import. + """ + module, target = self._module_and_target(qualified_name) + if module in ("", "builtins"): + return False + elif qualified_name not in self.existing_imports: + if module in self.existing_imports: + return True + elif module in self.module_imports: + m = self.module_imports[module] + if m.obj_name is None: + asname = m.alias else: - if isinstance(item.slice, cst.Index) and not isinstance( - item.slice.value, cst.Name - ): - new_index = item.slice.with_changes( - value=self._handle_Index(item.slice, item) - ) - item = item.with_changes(slice=new_index, comma=None) - new_slice.append(item) - return node.with_changes(slice=new_slice) - elif isinstance(slice, cst.Index): - return self._handle_Index(slice, node) - else: - return node + asname = None + AddImportsVisitor.add_needed_import( + self.context, m.module_name, asname=asname + ) + return True + else: + if node and isinstance(node, cst.Name) and node.value != target: + asname = node.value + else: + asname = None + AddImportsVisitor.add_needed_import( + self.context, + module, + target, + asname=asname, + ) + return False + return False - def _create_import_from_annotation(self, returns: cst.Annotation) -> cst.Annotation: - annotation = returns.annotation - if isinstance(annotation, cst.Attribute): - attr = self._add_annotation_to_imports(annotation) - return cst.Annotation(annotation=attr) - if isinstance(annotation, cst.Subscript): - value = annotation.value - if m.matches(value, m.Name(value="Type")): - return returns - return cst.Annotation(annotation=self._handle_Subscript(annotation)) - else: - return returns + # Handler functions - def _import_parameter_annotations( - self, parameters: cst.Parameters + def _handle_Parameters( + self, + parameters: cst.Parameters, ) -> cst.Parameters: - def update_annotations(parameters: Sequence[cst.Param]) -> List[cst.Param]: + def update_annotations( + parameters: Sequence[cst.Param], + ) -> List[cst.Param]: updated_parameters = [] for parameter in list(parameters): annotation = parameter.annotation if annotation is not None: parameter = parameter.with_changes( - annotation=self._create_import_from_annotation(annotation) + annotation=annotation.visit(_TypeCollectorDequalifier(self)) ) updated_parameters.append(parameter) return updated_parameters @@ -189,11 +530,91 @@ class TypeCollector(cst.CSTVisitor): return parameters.with_changes(params=update_annotations(parameters.params)) -@dataclass(frozen=True) -class Annotations: - function_annotations: Dict[str, FunctionAnnotation] = field(default_factory=dict) - attribute_annotations: Dict[str, cst.Annotation] = field(default_factory=dict) - class_definitions: Dict[str, cst.ClassDef] = field(default_factory=dict) +class _TypeCollectorDequalifier(cst.CSTTransformer): + def __init__(self, type_collector: "TypeCollector") -> None: + self.type_collector = type_collector + + def leave_Name( + self, original_node: cst.Name, updated_node: cst.Name + ) -> NameOrAttribute: + qualified_name = _get_unique_qualified_name(self.type_collector, original_node) + should_qualify = self.type_collector._handle_qualification_and_should_qualify( + qualified_name, original_node + ) + self.type_collector.annotations.names.add(qualified_name) + if should_qualify: + parts = qualified_name.split(".") + qualified_node = cst.Name(parts[0]) + for p in parts[1:]: + qualified_node = cst.Attribute(qualified_node, cst.Name(p)) + return qualified_node + else: + return original_node + + def visit_Attribute(self, node: cst.Attribute) -> bool: + return False + + def leave_Attribute( + self, original_node: cst.Attribute, updated_node: cst.Attribute + ) -> cst.BaseExpression: + qualified_name = _get_unique_qualified_name(self.type_collector, original_node) + should_qualify = self.type_collector._handle_qualification_and_should_qualify( + qualified_name, original_node + ) + self.type_collector.annotations.names.add(qualified_name) + if should_qualify: + return original_node + else: + return original_node.attr + + def leave_Index( + self, original_node: cst.Index, updated_node: cst.Index + ) -> cst.Index: + if isinstance(original_node.value, cst.SimpleString): + self.type_collector.annotations.names.add( + _get_string_value(original_node.value) + ) + return updated_node + + def visit_Subscript(self, node: cst.Subscript) -> bool: + return _get_unique_qualified_name(self.type_collector, node) not in ( + "Type", + "typing.Type", + ) + + def leave_Subscript( + self, original_node: cst.Subscript, updated_node: cst.Subscript + ) -> cst.Subscript: + if _get_unique_qualified_name(self.type_collector, original_node) in ( + "Type", + "typing.Type", + ): + # Note: we are intentionally not handling qualification of + # anything inside `Type` because it's common to have nested + # classes, which we cannot currently distinguish from classes + # coming from other modules, appear here. + return original_node.with_changes(value=original_node.value.visit(self)) + return updated_node + + +@dataclass +class AnnotationCounts: + global_annotations: int = 0 + attribute_annotations: int = 0 + parameter_annotations: int = 0 + return_annotations: int = 0 + classes_added: int = 0 + typevars_and_generics_added: int = 0 + + def any_changes_applied(self) -> bool: + return ( + self.global_annotations + + self.attribute_annotations + + self.parameter_annotations + + self.return_annotations + + self.classes_added + + self.typevars_and_generics_added + ) > 0 class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): @@ -204,10 +625,12 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): This is one of the transforms that is available automatically to you when running a codemod. To use it in this manner, import - :class:`~libcst.codemod.visitors.ApplyTypeAnnotationsVisitor` and then call the static - :meth:`~libcst.codemod.visitors.ApplyTypeAnnotationsVisitor.store_stub_in_context` method, - giving it the current context (found as ``self.context`` for all subclasses of - :class:`~libcst.codemod.Codemod`), the stub module from which you wish to add annotations. + :class:`~libcst.codemod.visitors.ApplyTypeAnnotationsVisitor` and then call + the static + :meth:`~libcst.codemod.visitors.ApplyTypeAnnotationsVisitor.store_stub_in_context` + method, giving it the current context (found as ``self.context`` for all + subclasses of :class:`~libcst.codemod.Codemod`), the stub module from which + you wish to add annotations. For example, you can store the type annotation ``int`` for ``x`` using:: @@ -224,7 +647,8 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): x: int = 1 - If the function or attribute already has a type annotation, it will not be overwritten. + If the function or attribute already has a type annotation, it will not be + overwritten. To overwrite existing annotations when applying annotations from a stub, use the keyword argument ``overwrite_existing_annotations=True`` when @@ -238,37 +662,56 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): context: CodemodContext, annotations: Optional[Annotations] = None, overwrite_existing_annotations: bool = False, + use_future_annotations: bool = False, + strict_posargs_matching: bool = True, + strict_annotation_matching: bool = False, + always_qualify_annotations: bool = False, ) -> None: super().__init__(context) # Qualifier for storing the canonical name of the current function. self.qualifier: List[str] = [] self.annotations: Annotations = ( - Annotations() if annotations is None else annotations + Annotations.empty() if annotations is None else annotations ) self.toplevel_annotations: Dict[str, cst.Annotation] = {} self.visited_classes: Set[str] = set() self.overwrite_existing_annotations = overwrite_existing_annotations + self.use_future_annotations = use_future_annotations + self.strict_posargs_matching = strict_posargs_matching + self.strict_annotation_matching = strict_annotation_matching + self.always_qualify_annotations = always_qualify_annotations # We use this to determine the end of the import block so that we can # insert top-level annotations. self.import_statements: List[cst.ImportFrom] = [] - @staticmethod - def store_stub_in_context( - context: CodemodContext, - stub: cst.Module, - overwrite_existing_annotations: bool = False, - ) -> None: - # deprecated, should be removed in 0.4 release. - ApplyTypeAnnotationsVisitor.store_stub_in_context( - context, stub, overwrite_existing_annotations - ) + # We use this to report annotations added, as well as to determine + # whether to abandon the codemod in edge cases where we may have + # only made changes to the imports. + self.annotation_counts: AnnotationCounts = AnnotationCounts() + + # We use this to collect typevars, to avoid importing existing ones from the pyi file + self.current_assign: Optional[cst.Assign] = None + self.typevars: Dict[str, cst.Assign] = {} + + # Global variables and classes defined on the toplevel of the target module. + # Used to help determine which names we need to check are in scope, and add + # quotations to avoid undefined forward references in type annotations. + self.global_names: Set[str] = set() + + # We use this to avoid annotating multiple assignments to the same + # symbol in a given scope + self.already_annotated: Set[str] = set() @staticmethod def store_stub_in_context( context: CodemodContext, stub: cst.Module, overwrite_existing_annotations: bool = False, + use_future_annotations: bool = False, + strict_posargs_matching: bool = True, + strict_annotation_matching: bool = False, + always_qualify_annotations: bool = False, ) -> None: """ Store a stub module in the :class:`~libcst.codemod.CodemodContext` so @@ -284,47 +727,226 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): context.scratch[ApplyTypeAnnotationsVisitor.CONTEXT_KEY] = ( stub, overwrite_existing_annotations, + use_future_annotations, + strict_posargs_matching, + strict_annotation_matching, + always_qualify_annotations, ) - def transform_module_impl(self, tree: cst.Module) -> cst.Module: + def transform_module_impl( + self, + tree: cst.Module, + ) -> cst.Module: """ Collect type annotations from all stubs and apply them to ``tree``. Gather existing imports from ``tree`` so that we don't add duplicate imports. + + Gather global names from ``tree`` so forward references are quoted. """ import_gatherer = GatherImportsVisitor(CodemodContext()) tree.visit(import_gatherer) - existing_import_names = _get_import_names(import_gatherer.all_imports) + existing_import_names = _get_imported_names(import_gatherer.all_imports) + + global_names_gatherer = GatherGlobalNamesVisitor(CodemodContext()) + tree.visit(global_names_gatherer) + self.global_names = global_names_gatherer.global_names.union( + global_names_gatherer.class_names + ) context_contents = self.context.scratch.get( ApplyTypeAnnotationsVisitor.CONTEXT_KEY ) if context_contents is not None: - stub, overwrite_existing_annotations = context_contents + ( + stub, + overwrite_existing_annotations, + use_future_annotations, + strict_posargs_matching, + strict_annotation_matching, + always_qualify_annotations, + ) = context_contents self.overwrite_existing_annotations = ( self.overwrite_existing_annotations or overwrite_existing_annotations ) - visitor = TypeCollector(existing_import_names, self.context) - stub.visit(visitor) - self.annotations.function_annotations.update(visitor.function_annotations) - self.annotations.attribute_annotations.update(visitor.attribute_annotations) - self.annotations.class_definitions.update(visitor.class_definitions) + self.use_future_annotations = ( + self.use_future_annotations or use_future_annotations + ) + self.strict_posargs_matching = ( + self.strict_posargs_matching and strict_posargs_matching + ) + self.strict_annotation_matching = ( + self.strict_annotation_matching or strict_annotation_matching + ) + self.always_qualify_annotations = ( + self.always_qualify_annotations or always_qualify_annotations + ) + module_imports = self._get_module_imports(stub, import_gatherer) + visitor = TypeCollector(existing_import_names, module_imports, self.context) + cst.MetadataWrapper(stub).visit(visitor) + self.annotations.update(visitor.annotations) - tree_with_imports = AddImportsVisitor(self.context).transform_module(tree) - return tree_with_imports.visit(self) + if self.use_future_annotations: + AddImportsVisitor.add_needed_import( + self.context, "__future__", "annotations" + ) + tree_with_imports = AddImportsVisitor(self.context).transform_module(tree) + + tree_with_changes = tree_with_imports.visit(self) + + # don't modify the imports if we didn't actually add any type information + if self.annotation_counts.any_changes_applied(): + return tree_with_changes + else: + return tree + + # helpers for collecting type information from the stub files + + def _get_module_imports( # noqa: C901: too complex + self, stub: cst.Module, existing_import_gatherer: GatherImportsVisitor + ) -> Dict[str, ImportItem]: + """Returns a dict of modules that need to be imported to qualify symbols.""" + # We correlate all imported symbols, e.g. foo.bar.Baz, with a list of module + # and from imports. If the same unqualified symbol is used from different + # modules, we give preference to an explicit from-import if any, and qualify + # everything else by importing the module. + # + # e.g. the following stub: + # import foo as quux + # from bar import Baz as X + # def f(x: X) -> quux.X: ... + # will return {'foo': ImportItem("foo", "quux")}. When the apply type + # annotation visitor hits `quux.X` it will retrieve the canonical name + # `foo.X` and then note that `foo` is in the module imports map, so it will + # leave the symbol qualified. + import_gatherer = GatherImportsVisitor(CodemodContext()) + stub.visit(import_gatherer) + symbol_map = import_gatherer.symbol_mapping + existing_import_names = _get_imported_names( + existing_import_gatherer.all_imports + ) + symbol_collector = ImportedSymbolCollector(existing_import_names, self.context) + cst.MetadataWrapper(stub).visit(symbol_collector) + module_imports = {} + for sym, imported_symbols in symbol_collector.imported_symbols.items(): + existing = existing_import_gatherer.symbol_mapping.get(sym) + if existing and any( + s.module_name != existing.module_name for s in imported_symbols + ): + # If a symbol is imported in the main file, we have to qualify + # it when imported from a different module in the stub file. + used = True + elif len(imported_symbols) == 1 and not self.always_qualify_annotations: + # If we have a single use of a new symbol we can from-import it + continue + else: + # There are multiple occurrences in the stub file and none in + # the main file. At least one can be from-imported. + used = False + for imp_sym in imported_symbols: + if not imp_sym.symbol: + continue + imp = symbol_map.get(imp_sym.symbol) + if self.always_qualify_annotations and sym not in existing_import_names: + # Override 'always qualify' if this is a typing import, or + # the main file explicitly from-imports a symbol. + if imp and imp.module_name != "typing": + module_imports[imp.module_name] = imp + else: + imp = symbol_map.get(imp_sym.module_symbol) + if imp: + module_imports[imp.module_name] = imp + elif not used and imp and imp.module_name == imp_sym.module_name: + # We can only import a symbol directly once. + used = True + elif sym in existing_import_names: + if imp: + module_imports[imp.module_name] = imp + else: + imp = symbol_map.get(imp_sym.module_symbol) + if imp: + # imp will be None in corner cases like + # import foo.bar as Baz + # x: Baz + # which is technically valid python but nonsensical as a + # type annotation. Dropping it on the floor for now. + module_imports[imp.module_name] = imp + return module_imports + + # helpers for processing annotation nodes + def _quote_future_annotations(self, annotation: cst.Annotation) -> cst.Annotation: + # TODO: We probably want to make sure references to classes defined in the current + # module come to us fully qualified - so we can do the dequalification here and + # know to look for what is in-scope without also catching builtins like "None" in the + # quoting. This should probably also be extended to handle what imports are in scope, + # as well as subscriptable types. + # Note: We are collecting all imports and passing this to the type collector grabbing + # annotations from the stub file; should consolidate import handling somewhere too. + node = annotation.annotation + if ( + isinstance(node, cst.Name) + and (node.value in self.global_names) + and not (node.value in self.visited_classes) + ): + return annotation.with_changes( + annotation=cst.SimpleString(value=f'"{node.value}"') + ) + return annotation + + # smart constructors: all applied annotations happen via one of these + + def _apply_annotation_to_attribute_or_global( + self, + name: str, + annotation: cst.Annotation, + value: Optional[cst.BaseExpression], + ) -> cst.AnnAssign: + if len(self.qualifier) == 0: + self.annotation_counts.global_annotations += 1 + else: + self.annotation_counts.attribute_annotations += 1 + return cst.AnnAssign( + cst.Name(name), + self._quote_future_annotations(annotation), + value, + ) + + def _apply_annotation_to_parameter( + self, + parameter: cst.Param, + annotation: cst.Annotation, + ) -> cst.Param: + self.annotation_counts.parameter_annotations += 1 + return parameter.with_changes( + annotation=self._quote_future_annotations(annotation), + ) + + def _apply_annotation_to_return( + self, + function_def: cst.FunctionDef, + annotation: cst.Annotation, + ) -> cst.FunctionDef: + self.annotation_counts.return_annotations += 1 + return function_def.with_changes( + returns=self._quote_future_annotations(annotation), + ) + + # private methods used in the visit and leave methods def _qualifier_name(self) -> str: return ".".join(self.qualifier) def _annotate_single_target( - self, node: cst.Assign, updated_node: cst.Assign + self, + node: cst.Assign, + updated_node: cst.Assign, ) -> Union[cst.Assign, cst.AnnAssign]: only_target = node.targets[0].target if isinstance(only_target, (cst.Tuple, cst.List)): for element in only_target.elements: value = element.value name = get_full_name_for_node(value) - if name: + if name is not None and name != "_": self._add_to_toplevel_annotations(name) elif isinstance(only_target, (cst.Subscript)): pass @@ -332,21 +954,27 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): name = get_full_name_for_node(only_target) if name is not None: self.qualifier.append(name) - if ( - self._qualifier_name() in self.annotations.attribute_annotations - and not isinstance(only_target, cst.Subscript) + qualifier_name = self._qualifier_name() + if qualifier_name in self.annotations.attributes and not isinstance( + only_target, (cst.Attribute, cst.Subscript) ): - annotation = self.annotations.attribute_annotations[ - self._qualifier_name() - ] - self.qualifier.pop() - return cst.AnnAssign(cst.Name(name), annotation, node.value) + if qualifier_name not in self.already_annotated: + self.already_annotated.add(qualifier_name) + annotation = self.annotations.attributes[qualifier_name] + self.qualifier.pop() + return self._apply_annotation_to_attribute_or_global( + name=name, + annotation=annotation, + value=node.value, + ) else: self.qualifier.pop() return updated_node def _split_module( - self, module: cst.Module, updated_module: cst.Module + self, + module: cst.Module, + updated_module: cst.Module, ) -> Tuple[ List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], @@ -369,48 +997,65 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): list(updated_module.body[import_add_location:]), ) - def _add_to_toplevel_annotations(self, name: str) -> None: + def _add_to_toplevel_annotations( + self, + name: str, + ) -> None: self.qualifier.append(name) - if self._qualifier_name() in self.annotations.attribute_annotations: - annotation = self.annotations.attribute_annotations[self._qualifier_name()] + if self._qualifier_name() in self.annotations.attributes: + annotation = self.annotations.attributes[self._qualifier_name()] self.toplevel_annotations[name] = annotation self.qualifier.pop() def _update_parameters( - self, annotations: FunctionAnnotation, updated_node: cst.FunctionDef + self, + annotations: FunctionAnnotation, + updated_node: cst.FunctionDef, ) -> cst.Parameters: # Update params and default params with annotations # Don't override existing annotations or default values unless asked # to overwrite existing annotations. def update_annotation( - parameters: Sequence[cst.Param], annotations: Sequence[cst.Param] + parameters: Sequence[cst.Param], + annotations: Sequence[cst.Param], + positional: bool, ) -> List[cst.Param]: parameter_annotations = {} annotated_parameters = [] - for parameter in annotations: + positional = positional and not self.strict_posargs_matching + for i, parameter in enumerate(annotations): + key = i if positional else parameter.name.value if parameter.annotation: - parameter_annotations[parameter.name.value] = parameter.annotation - for parameter in parameters: - key = parameter.name.value + parameter_annotations[key] = parameter.annotation.with_changes( + whitespace_before_indicator=cst.SimpleWhitespace(value="") + ) + for i, parameter in enumerate(parameters): + key = i if positional else parameter.name.value if key in parameter_annotations and ( self.overwrite_existing_annotations or not parameter.annotation ): - parameter = parameter.with_changes( - annotation=parameter_annotations[key] + parameter = self._apply_annotation_to_parameter( + parameter=parameter, + annotation=parameter_annotations[key], ) annotated_parameters.append(parameter) return annotated_parameters - return annotations.parameters.with_changes( + return updated_node.params.with_changes( params=update_annotation( - updated_node.params.params, annotations.parameters.params + updated_node.params.params, + annotations.parameters.params, + positional=True, ), kwonly_params=update_annotation( - updated_node.params.kwonly_params, annotations.parameters.kwonly_params + updated_node.params.kwonly_params, + annotations.parameters.kwonly_params, + positional=False, ), posonly_params=update_annotation( updated_node.params.posonly_params, annotations.parameters.posonly_params, + positional=True, ), ) @@ -438,49 +1083,182 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): *statements[1:], ] - def visit_ClassDef(self, node: cst.ClassDef) -> None: + def _match_signatures( # noqa: C901: Too complex + self, + function: cst.FunctionDef, + annotations: FunctionAnnotation, + ) -> bool: + """Check that function annotations on both signatures are compatible.""" + + def compatible( + p: Optional[cst.Annotation], + q: Optional[cst.Annotation], + ) -> bool: + if ( + self.overwrite_existing_annotations + or not _is_non_sentinel(p) + or not _is_non_sentinel(q) + ): + return True + if not self.strict_annotation_matching: + # We will not overwrite clashing annotations, but the signature as a + # whole will be marked compatible so that holes can be filled in. + return True + return p.annotation.deep_equals(q.annotation) # pyre-ignore[16] + + def match_posargs( + ps: Sequence[cst.Param], + qs: Sequence[cst.Param], + ) -> bool: + if len(ps) != len(qs): + return False + for p, q in zip(ps, qs): + if self.strict_posargs_matching and not p.name.value == q.name.value: + return False + if not compatible(p.annotation, q.annotation): + return False + return True + + def match_kwargs( + ps: Sequence[cst.Param], + qs: Sequence[cst.Param], + ) -> bool: + ps_dict = {x.name.value: x for x in ps} + qs_dict = {x.name.value: x for x in qs} + if set(ps_dict.keys()) != set(qs_dict.keys()): + return False + for k in ps_dict.keys(): + if not compatible(ps_dict[k].annotation, qs_dict[k].annotation): + return False + return True + + def match_star( + p: StarParamType, + q: StarParamType, + ) -> bool: + return _is_non_sentinel(p) == _is_non_sentinel(q) + + def match_params( + f: cst.FunctionDef, + g: FunctionAnnotation, + ) -> bool: + p, q = f.params, g.parameters + return ( + match_posargs(p.params, q.params) + and match_posargs(p.posonly_params, q.posonly_params) + and match_kwargs(p.kwonly_params, q.kwonly_params) + and match_star(p.star_arg, q.star_arg) + and match_star(p.star_kwarg, q.star_kwarg) + ) + + def match_return( + f: cst.FunctionDef, + g: FunctionAnnotation, + ) -> bool: + return compatible(f.returns, g.returns) + + return match_params(function, annotations) and match_return( + function, annotations + ) + + # transform API methods + + def visit_ClassDef( + self, + node: cst.ClassDef, + ) -> None: self.qualifier.append(node.name.value) - self.visited_classes.add(node.name.value) def leave_ClassDef( - self, original_node: cst.ClassDef, updated_node: cst.ClassDef + self, + original_node: cst.ClassDef, + updated_node: cst.ClassDef, ) -> cst.ClassDef: + self.visited_classes.add(original_node.name.value) + cls_name = ".".join(self.qualifier) self.qualifier.pop() + definition = self.annotations.class_definitions.get(cls_name) + if definition: + b1 = _find_generic_base(definition) + b2 = _find_generic_base(updated_node) + if b1 and not b2: + new_bases = list(updated_node.bases) + [b1] + self.annotation_counts.typevars_and_generics_added += 1 + return updated_node.with_changes(bases=new_bases) return updated_node - def visit_FunctionDef(self, node: cst.FunctionDef) -> bool: + def visit_FunctionDef( + self, + node: cst.FunctionDef, + ) -> bool: self.qualifier.append(node.name.value) # pyi files don't support inner functions, return False to stop the traversal. return False def leave_FunctionDef( - self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef + self, + original_node: cst.FunctionDef, + updated_node: cst.FunctionDef, ) -> cst.FunctionDef: - key = self._qualifier_name() + key = FunctionKey.make(self._qualifier_name(), updated_node.params) self.qualifier.pop() - if key in self.annotations.function_annotations: - function_annotation = self.annotations.function_annotations[key] - # Only add new annotation if explicitly told to overwrite existing - # annotations or if one doesn't already exist. - if self.overwrite_existing_annotations or not updated_node.returns: - updated_node = updated_node.with_changes( - returns=function_annotation.returns + if key in self.annotations.functions: + function_annotation = self.annotations.functions[key] + # Only add new annotation if: + # * we have matching function signatures and + # * we are explicitly told to overwrite existing annotations or + # * there is no existing annotation + if not self._match_signatures(updated_node, function_annotation): + return updated_node + set_return_annotation = ( + self.overwrite_existing_annotations or updated_node.returns is None + ) + if set_return_annotation and function_annotation.returns is not None: + updated_node = self._apply_annotation_to_return( + function_def=updated_node, + annotation=function_annotation.returns, ) # Don't override default values when annotating functions new_parameters = self._update_parameters(function_annotation, updated_node) return updated_node.with_changes(params=new_parameters) return updated_node + def visit_Assign( + self, + node: cst.Assign, + ) -> None: + self.current_assign = node + + @m.call_if_inside(m.Assign()) + @m.visit(m.Call(func=m.Name("TypeVar"))) + def record_typevar( + self, + node: cst.Call, + ) -> None: + # pyre-ignore current_assign is never None here + name = get_full_name_for_node(self.current_assign.targets[0].target) + if name is not None: + # Preserve the whole node, even though we currently just use the + # name, so that we can match bounds and variance at some point and + # determine if two typevars with the same name are indeed the same. + + # pyre-ignore current_assign is never None here + self.typevars[name] = self.current_assign + self.current_assign = None + def leave_Assign( - self, original_node: cst.Assign, updated_node: cst.Assign + self, + original_node: cst.Assign, + updated_node: cst.Assign, ) -> Union[cst.Assign, cst.AnnAssign]: + self.current_assign = None if len(original_node.targets) > 1: for assign in original_node.targets: target = assign.target if isinstance(target, (cst.Name, cst.Attribute)): name = get_full_name_for_node(target) - if name is not None: + if name is not None and name != "_": # Add separate top-level annotations for `a = b = 1` # as `a: int` and `b: int`. self._add_to_toplevel_annotations(name) @@ -489,21 +1267,34 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): return self._annotate_single_target(original_node, updated_node) def leave_ImportFrom( - self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom + self, + original_node: cst.ImportFrom, + updated_node: cst.ImportFrom, ) -> cst.ImportFrom: self.import_statements.append(original_node) return updated_node def leave_Module( - self, original_node: cst.Module, updated_node: cst.Module + self, + original_node: cst.Module, + updated_node: cst.Module, ) -> cst.Module: fresh_class_definitions = [ definition for name, definition in self.annotations.class_definitions.items() if name not in self.visited_classes ] - if not self.toplevel_annotations and not fresh_class_definitions: + + # NOTE: The entire change will also be abandoned if + # self.annotation_counts is all 0s, so if adding any new category make + # sure to record it there. + if not ( + self.toplevel_annotations + or fresh_class_definitions + or self.annotations.typevars + ): return updated_node + toplevel_statements = [] # First, find the insertion point for imports statements_before_imports, statements_after_imports = self._split_module( @@ -514,9 +1305,26 @@ class ApplyTypeAnnotationsVisitor(ContextAwareTransformer): statements_after_imports = self._insert_empty_line(statements_after_imports) for name, annotation in self.toplevel_annotations.items(): - annotated_assign = cst.AnnAssign(cst.Name(name), annotation, None) + annotated_assign = self._apply_annotation_to_attribute_or_global( + name=name, + annotation=annotation, + value=None, + ) toplevel_statements.append(cst.SimpleStatementLine([annotated_assign])) + # TypeVar definitions could be scattered through the file, so do not + # attempt to put new ones with existing ones, just add them at the top. + typevars = { + k: v for k, v in self.annotations.typevars.items() if k not in self.typevars + } + if typevars: + for var, stmt in typevars.items(): + toplevel_statements.append(cst.Newline()) + toplevel_statements.append(stmt) + self.annotation_counts.typevars_and_generics_added += 1 + toplevel_statements.append(cst.Newline()) + + self.annotation_counts.classes_added = len(fresh_class_definitions) toplevel_statements.extend(fresh_class_definitions) return updated_node.with_changes( diff --git a/libcst/codemod/visitors/_gather_comments.py b/libcst/codemod/visitors/_gather_comments.py index 5adcecf0..e499382b 100644 --- a/libcst/codemod/visitors/_gather_comments.py +++ b/libcst/codemod/visitors/_gather_comments.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/visitors/_gather_exports.py b/libcst/codemod/visitors/_gather_exports.py index 967f01e9..bb1c8894 100644 --- a/libcst/codemod/visitors/_gather_exports.py +++ b/libcst/codemod/visitors/_gather_exports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,6 +6,7 @@ from typing import Set, Union import libcst as cst +import libcst.matchers as m from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor from libcst.helpers import get_full_name_for_node @@ -53,6 +54,21 @@ class GatherExportsVisitor(ContextAwareVisitor): return True return False + def visit_AugAssign(self, node: cst.AugAssign) -> bool: + if m.matches( + node, + m.AugAssign( + target=m.Name("__all__"), + operator=m.AddAssign(), + value=m.List() | m.Tuple(), + ), + ): + value = node.value + if isinstance(value, (cst.List, cst.Tuple)): + self._is_assigned_export.add(value) + return True + return False + def visit_Assign(self, node: cst.Assign) -> bool: for target_node in node.targets: if self._handle_assign_target(target_node.target, node.value): @@ -124,6 +140,6 @@ class GatherExportsVisitor(ContextAwareVisitor): ) -> None: if self._in_assigned_export: name = node.evaluated_value - if name is None: + if not isinstance(name, str): return self.explicit_exported_objects.add(name) diff --git a/libcst/codemod/visitors/_gather_global_names.py b/libcst/codemod/visitors/_gather_global_names.py new file mode 100644 index 00000000..c4a5d57d --- /dev/null +++ b/libcst/codemod/visitors/_gather_global_names.py @@ -0,0 +1,75 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Set + +import libcst +from libcst.codemod._context import CodemodContext +from libcst.codemod._visitor import ContextAwareVisitor + + +class GatherGlobalNamesVisitor(ContextAwareVisitor): + """ + Gathers all globally accessible names defined in a module and stores them as + attributes on the instance. + Intended to be instantiated and passed to a :class:`~libcst.Module` + :meth:`~libcst.CSTNode.visit` method in order to gather up information about + names defined on a module. Note that this is not a substitute for scope + analysis or qualified name support. Please see :ref:`libcst-scope-tutorial` + for a more robust way of determining the qualified name and definition for + an arbitrary node. + Names that are globally accessible through imports are currently not included + but can be retrieved with GatherImportsVisitor. + + After visiting a module the following attributes will be populated: + + global_names + A sequence of strings representing global variables defined in the module + toplevel. + class_names + A sequence of strings representing classes defined in the module toplevel. + function_names + A sequence of strings representing functions defined in the module toplevel. + + """ + + def __init__(self, context: CodemodContext) -> None: + super().__init__(context) + self.global_names: Set[str] = set() + self.class_names: Set[str] = set() + self.function_names: Set[str] = set() + # Track scope nesting + self.scope_depth: int = 0 + + def visit_ClassDef(self, node: libcst.ClassDef) -> None: + if self.scope_depth == 0: + self.class_names.add(node.name.value) + self.scope_depth += 1 + + def leave_ClassDef(self, original_node: libcst.ClassDef) -> None: + self.scope_depth -= 1 + + def visit_FunctionDef(self, node: libcst.FunctionDef) -> None: + if self.scope_depth == 0: + self.function_names.add(node.name.value) + self.scope_depth += 1 + + def leave_FunctionDef(self, original_node: libcst.FunctionDef) -> None: + self.scope_depth -= 1 + + def visit_Assign(self, node: libcst.Assign) -> None: + if self.scope_depth != 0: + return + for assign_target in node.targets: + target = assign_target.target + if isinstance(target, libcst.Name): + self.global_names.add(target.value) + + def visit_AnnAssign(self, node: libcst.AnnAssign) -> None: + if self.scope_depth != 0: + return + target = node.target + if isinstance(target, libcst.Name): + self.global_names.add(target.value) diff --git a/libcst/codemod/visitors/_gather_imports.py b/libcst/codemod/visitors/_gather_imports.py index 5920890f..6b187c53 100644 --- a/libcst/codemod/visitors/_gather_imports.py +++ b/libcst/codemod/visitors/_gather_imports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,10 +8,87 @@ from typing import Dict, List, Sequence, Set, Tuple, Union import libcst from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor -from libcst.helpers import get_absolute_module_for_import +from libcst.codemod.visitors._imports import ImportItem +from libcst.helpers import get_absolute_module_from_package_for_import -class GatherImportsVisitor(ContextAwareVisitor): +class _GatherImportsMixin(ContextAwareVisitor): + """ + A Mixin class for tracking visited imports. + """ + + def __init__(self, context: CodemodContext) -> None: + super().__init__(context) + # Track the available imports in this transform + self.module_imports: Set[str] = set() + self.object_mapping: Dict[str, Set[str]] = {} + # Track the aliased imports in this transform + self.module_aliases: Dict[str, str] = {} + self.alias_mapping: Dict[str, List[Tuple[str, str]]] = {} + # Track the import for every symbol introduced into the module + self.symbol_mapping: Dict[str, ImportItem] = {} + + def _handle_Import(self, node: libcst.Import) -> None: + for name in node.names: + alias = name.evaluated_alias + imp = ImportItem(name.evaluated_name, alias=alias) + if alias is not None: + # Track this as an aliased module + self.module_aliases[name.evaluated_name] = alias + self.symbol_mapping[alias] = imp + else: + # Get the module we're importing as a string. + self.module_imports.add(name.evaluated_name) + self.symbol_mapping[name.evaluated_name] = imp + + def _handle_ImportFrom(self, node: libcst.ImportFrom) -> None: + # Get the module we're importing as a string. + module = get_absolute_module_from_package_for_import( + self.context.full_package_name, node + ) + if module is None: + # Can't get the absolute import from relative, so we can't + # support this. + return + nodenames = node.names + if isinstance(nodenames, libcst.ImportStar): + # We cover everything, no need to bother tracking other things + self.object_mapping[module] = set("*") + return + elif isinstance(nodenames, Sequence): + # Get the list of imports we're aliasing in this import + new_aliases = [ + (ia.evaluated_name, ia.evaluated_alias) + for ia in nodenames + if ia.asname is not None + ] + if new_aliases: + if module not in self.alias_mapping: + self.alias_mapping[module] = [] + # pyre-ignore We know that aliases are not None here. + self.alias_mapping[module].extend(new_aliases) + + # Get the list of imports we're importing in this import + new_objects = {ia.evaluated_name for ia in nodenames if ia.asname is None} + if new_objects: + if module not in self.object_mapping: + self.object_mapping[module] = set() + + # Make sure that we don't add to a '*' module + if "*" in self.object_mapping[module]: + self.object_mapping[module] = set("*") + return + + self.object_mapping[module].update(new_objects) + for ia in nodenames: + imp = ImportItem( + module, obj_name=ia.evaluated_name, alias=ia.evaluated_alias + ) + key = ia.evaluated_alias or ia.evaluated_name + self.symbol_mapping[key] = imp + + +class GatherImportsVisitor(_GatherImportsMixin): """ Gathers all imports in a module and stores them as attributes on the instance. Intended to be instantiated and passed to a :class:`~libcst.Module` @@ -52,65 +129,15 @@ class GatherImportsVisitor(ContextAwareVisitor): def __init__(self, context: CodemodContext) -> None: super().__init__(context) - # Track the available imports in this transform - self.module_imports: Set[str] = set() - self.object_mapping: Dict[str, Set[str]] = {} - # Track the aliased imports in this transform - self.module_aliases: Dict[str, str] = {} - self.alias_mapping: Dict[str, List[Tuple[str, str]]] = {} # Track all of the imports found in this transform self.all_imports: List[Union[libcst.Import, libcst.ImportFrom]] = [] def visit_Import(self, node: libcst.Import) -> None: # Track this import statement for later analysis. self.all_imports.append(node) - - for name in node.names: - alias = name.evaluated_alias - if alias is not None: - # Track this as an aliased module - self.module_aliases[name.evaluated_name] = alias - else: - # Get the module we're importing as a string. - self.module_imports.add(name.evaluated_name) + self._handle_Import(node) def visit_ImportFrom(self, node: libcst.ImportFrom) -> None: # Track this import statement for later analysis. self.all_imports.append(node) - - # Get the module we're importing as a string. - module = get_absolute_module_for_import(self.context.full_module_name, node) - if module is None: - # Can't get the absolute import from relative, so we can't - # support this. - return - nodenames = node.names - if isinstance(nodenames, libcst.ImportStar): - # We cover everything, no need to bother tracking other things - self.object_mapping[module] = set("*") - return - elif isinstance(nodenames, Sequence): - # Get the list of imports we're aliasing in this import - new_aliases = [ - (ia.evaluated_name, ia.evaluated_alias) - for ia in nodenames - if ia.asname is not None - ] - if new_aliases: - if module not in self.alias_mapping: - self.alias_mapping[module] = [] - # pyre-ignore We know that aliases are not None here. - self.alias_mapping[module].extend(new_aliases) - - # Get the list of imports we're importing in this import - new_objects = {ia.evaluated_name for ia in nodenames if ia.asname is None} - if new_objects: - if module not in self.object_mapping: - self.object_mapping[module] = set() - - # Make sure that we don't add to a '*' module - if "*" in self.object_mapping[module]: - self.object_mapping[module] = set("*") - return - - self.object_mapping[module].update(new_objects) + self._handle_ImportFrom(node) diff --git a/libcst/codemod/visitors/_gather_string_annotation_names.py b/libcst/codemod/visitors/_gather_string_annotation_names.py index c3d62445..b7268ffc 100644 --- a/libcst/codemod/visitors/_gather_string_annotation_names.py +++ b/libcst/codemod/visitors/_gather_string_annotation_names.py @@ -1,9 +1,9 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Collection, List, Set, Union, cast +from typing import cast, Collection, List, Set, Union import libcst as cst import libcst.matchers as m @@ -11,7 +11,6 @@ from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareVisitor from libcst.metadata import MetadataWrapper, QualifiedNameProvider - FUNCS_CONSIDERED_AS_STRING_ANNOTATIONS = {"typing.TypeVar"} @@ -45,6 +44,11 @@ class GatherNamesFromStringAnnotationsVisitor(ContextAwareVisitor): def leave_Annotation(self, original_node: cst.Annotation) -> None: self._annotation_stack.pop() + def visit_Subscript(self, node: cst.Subscript) -> bool: + qnames = self.get_metadata(QualifiedNameProvider, node) + # A Literal["foo"] should not be interpreted as a use of the symbol "foo". + return not any(qn.name == "typing.Literal" for qn in qnames) + def visit_Call(self, node: cst.Call) -> bool: qnames = self.get_metadata(QualifiedNameProvider, node) if any(qn.name in self._typing_functions for qn in qnames): @@ -72,7 +76,11 @@ class GatherNamesFromStringAnnotationsVisitor(ContextAwareVisitor): value = node.evaluated_value if value is None: return - mod = cst.parse_module(value) + try: + mod = cst.parse_module(value) + except cst.ParserSyntaxError: + # Not all strings inside a type annotation are meant to be valid Python code. + return extracted_nodes = m.extractall( mod, m.Name( diff --git a/libcst/codemod/visitors/_gather_unused_imports.py b/libcst/codemod/visitors/_gather_unused_imports.py index 82860325..01243cae 100644 --- a/libcst/codemod/visitors/_gather_unused_imports.py +++ b/libcst/codemod/visitors/_gather_unused_imports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -17,7 +17,6 @@ from libcst.codemod.visitors._gather_string_annotation_names import ( from libcst.metadata import ProviderT, ScopeProvider from libcst.metadata.scope_provider import _gen_dotted_names - MODULES_IGNORED_BY_DEFAULT = {"__future__"} @@ -35,6 +34,10 @@ class GatherUnusedImportsVisitor(ContextAwareVisitor): parent import node. """ + # pyre-fixme[8]: Attribute has type + # `Tuple[typing.Type[cst.metadata.base_provider.BaseMetadataProvider[object]]]`; + # used as `Tuple[typing.Type[cst.metadata.name_provider.QualifiedNameProvider], + # typing.Type[cst.metadata.scope_provider.ScopeProvider]]`. METADATA_DEPENDENCIES: Tuple[ProviderT] = ( *GatherNamesFromStringAnnotationsVisitor.METADATA_DEPENDENCIES, ScopeProvider, @@ -105,7 +108,7 @@ class GatherUnusedImportsVisitor(ContextAwareVisitor): Override this in a subclass for additional filtering. """ unused_imports = set() - for (alias, parent) in candidates: + for alias, parent in candidates: scope = self.get_metadata(ScopeProvider, parent) if scope is None: continue @@ -135,8 +138,7 @@ class GatherUnusedImportsVisitor(ContextAwareVisitor): for assignment in scope[name_or_alias]: if ( - isinstance(assignment, cst.metadata.Assignment) - and isinstance(assignment.node, (cst.ImportFrom, cst.Import)) + isinstance(assignment, cst.metadata.ImportAssignment) and len(assignment.references) > 0 ): return True diff --git a/libcst/codemod/visitors/_imports.py b/libcst/codemod/visitors/_imports.py new file mode 100644 index 00000000..f3c1c305 --- /dev/null +++ b/libcst/codemod/visitors/_imports.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass, replace +from typing import Optional + +from libcst.helpers import get_absolute_module_from_package + + +@dataclass(frozen=True) +class ImportItem: + """Representation of individual import items for codemods.""" + + module_name: str + obj_name: Optional[str] = None + alias: Optional[str] = None + relative: int = 0 + + def __post_init__(self) -> None: + if self.module_name is None: + object.__setattr__(self, "module_name", "") + elif self.module_name.startswith("."): + mod = self.module_name.lstrip(".") + rel = self.relative + len(self.module_name) - len(mod) + object.__setattr__(self, "module_name", mod) + object.__setattr__(self, "relative", rel) + + @property + def module(self) -> str: + return "." * self.relative + self.module_name + + def resolve_relative(self, package_name: Optional[str]) -> "ImportItem": + """Return an ImportItem with an absolute module name if possible.""" + mod = self + # `import ..a` -> `from .. import a` + if mod.relative and mod.obj_name is None: + mod = replace(mod, module_name="", obj_name=mod.module_name) + if package_name is None: + return mod + m = get_absolute_module_from_package( + package_name, mod.module_name or None, self.relative + ) + return mod if m is None else replace(mod, module_name=m, relative=0) diff --git a/libcst/codemod/visitors/_remove_imports.py b/libcst/codemod/visitors/_remove_imports.py index 9d3b6902..b625ee60 100644 --- a/libcst/codemod/visitors/_remove_imports.py +++ b/libcst/codemod/visitors/_remove_imports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,10 +6,14 @@ from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union import libcst as cst +from libcst import CSTLogicError from libcst.codemod._context import CodemodContext from libcst.codemod._visitor import ContextAwareTransformer, ContextAwareVisitor from libcst.codemod.visitors._gather_unused_imports import GatherUnusedImportsVisitor -from libcst.helpers import get_absolute_module_for_import, get_full_name_for_node +from libcst.helpers import ( + get_absolute_module_from_package_for_import, + get_full_name_for_node, +) from libcst.metadata import Assignment, ProviderT, ScopeProvider @@ -38,11 +42,11 @@ class RemovedNodeVisitor(ContextAwareVisitor): # We don't handle removing this, so ignore it. return - module_name = get_absolute_module_for_import( - self.context.full_module_name, import_node + module_name = get_absolute_module_from_package_for_import( + self.context.full_package_name, import_node ) if module_name is None: - raise Exception("Cannot look up absolute module from relative import!") + raise ValueError("Cannot look up absolute module from relative import!") # We know any local names will refer to this as an alias if # there is one, and as the original name if there is not one @@ -69,7 +73,9 @@ class RemovedNodeVisitor(ContextAwareVisitor): # Look up the scope for this node, remove the import that caused it to exist. metadata_wrapper = self.context.wrapper if metadata_wrapper is None: - raise Exception("Cannot look up import, metadata is not computed for node!") + raise ValueError( + "Cannot look up import, metadata is not computed for node!" + ) scope_provider = metadata_wrapper.resolve(ScopeProvider) try: scope = scope_provider[node] @@ -182,7 +188,7 @@ class RemoveImportsVisitor(ContextAwareTransformer): ) -> List[Tuple[str, Optional[str], Optional[str]]]: unused_imports = context.scratch.get(RemoveImportsVisitor.CONTEXT_KEY, []) if not isinstance(unused_imports, list): - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") return unused_imports @staticmethod @@ -248,9 +254,11 @@ class RemoveImportsVisitor(ContextAwareTransformer): if isinstance(names, cst.ImportStar): # We don't handle removing this, so ignore it. return - module_name = get_absolute_module_for_import(context.full_module_name, node) + module_name = get_absolute_module_from_package_for_import( + context.full_package_name, node + ) if module_name is None: - raise Exception("Cannot look up absolute module from relative import!") + raise ValueError("Cannot look up absolute module from relative import!") for import_alias in names: RemoveImportsVisitor.remove_unused_import( context, @@ -413,8 +421,8 @@ class RemoveImportsVisitor(ContextAwareTransformer): return updated_node # Make sure we actually know the absolute module. - module_name = get_absolute_module_for_import( - self.context.full_module_name, updated_node + module_name = get_absolute_module_from_package_for_import( + self.context.full_package_name, updated_node ) if module_name is None or module_name not in self.unused_obj_imports: # This node isn't on our list of todos, so let's bail. diff --git a/libcst/codemod/visitors/tests/__init__.py b/libcst/codemod/visitors/tests/__init__.py index 602d2685..aac70d45 100644 --- a/libcst/codemod/visitors/tests/__init__.py +++ b/libcst/codemod/visitors/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/visitors/tests/test_add_imports.py b/libcst/codemod/visitors/tests/test_add_imports.py index 57060269..613da907 100644 --- a/libcst/codemod/visitors/tests/test_add_imports.py +++ b/libcst/codemod/visitors/tests/test_add_imports.py @@ -1,14 +1,13 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from libcst.codemod import CodemodContext, CodemodTest -from libcst.codemod.visitors import AddImportsVisitor +from libcst.codemod.visitors import AddImportsVisitor, ImportItem class TestAddImportsCodemod(CodemodTest): - TRANSFORM = AddImportsVisitor def test_noop(self) -> None: @@ -55,7 +54,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", None, None)]) + self.assertCodemod(before, after, [ImportItem("a.b.c", None, None)]) def test_dont_add_module_simple(self) -> None: """ @@ -81,7 +80,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", None, None)]) + self.assertCodemod(before, after, [ImportItem("a.b.c", None, None)]) def test_add_module_alias_simple(self) -> None: """ @@ -105,7 +104,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", None, "d")]) + self.assertCodemod(before, after, [ImportItem("a.b.c", None, "d")]) def test_dont_add_module_alias_simple(self) -> None: """ @@ -131,7 +130,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", None, "d")]) + self.assertCodemod(before, after, [ImportItem("a.b.c", None, "d")]) def test_add_module_complex(self) -> None: """ @@ -167,11 +166,11 @@ class TestAddImportsCodemod(CodemodTest): before, after, [ - ("a.b.c", None, None), - ("defg.hi", None, None), - ("argparse", None, None), - ("jkl", None, "h"), - ("i.j", None, "k"), + ImportItem("a.b.c", None, None), + ImportItem("defg.hi", None, None), + ImportItem("argparse", None, None), + ImportItem("jkl", None, "h"), + ImportItem("i.j", None, "k"), ], ) @@ -197,7 +196,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", "D", None)]) + self.assertCodemod(before, after, [ImportItem("a.b.c", "D", None)]) def test_add_object_alias_simple(self) -> None: """ @@ -221,7 +220,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", "D", "E")]) + self.assertCodemod(before, after, [ImportItem("a.b.c", "D", "E")]) def test_add_future(self) -> None: """ @@ -250,7 +249,9 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("__future__", "dummy_feature", None)]) + self.assertCodemod( + before, after, [ImportItem("__future__", "dummy_feature", None)] + ) def test_dont_add_object_simple(self) -> None: """ @@ -276,7 +277,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", "D", None)]) + self.assertCodemod(before, after, [ImportItem("a.b.c", "D", None)]) def test_dont_add_object_alias_simple(self) -> None: """ @@ -302,7 +303,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", "D", "E")]) + self.assertCodemod(before, after, [ImportItem("a.b.c", "D", "E")]) def test_add_object_modify_simple(self) -> None: """ @@ -328,7 +329,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", "D", None)]) + self.assertCodemod(before, after, [ImportItem("a.b.c", "D", None)]) def test_add_object_alias_modify_simple(self) -> None: """ @@ -354,7 +355,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", "D", "_")]) + self.assertCodemod(before, after, [ImportItem("a.b.c", "D", "_")]) def test_add_object_modify_complex(self) -> None: """ @@ -387,17 +388,17 @@ class TestAddImportsCodemod(CodemodTest): before, after, [ - ("a.b.c", "D", None), - ("a.b.c", "F", None), - ("a.b.c", "G", "H"), - ("d.e.f", "Foo", None), - ("g.h.i", "Z", None), - ("g.h.i", "X", None), - ("d.e.f", "Bar", None), - ("d.e.f", "Baz", "Qux"), - ("g.h.i", "Y", None), - ("g.h.i", "V", "W"), - ("a.b.c", "F", None), + ImportItem("a.b.c", "D", None), + ImportItem("a.b.c", "F", None), + ImportItem("a.b.c", "G", "H"), + ImportItem("d.e.f", "Foo", None), + ImportItem("g.h.i", "Z", None), + ImportItem("g.h.i", "X", None), + ImportItem("d.e.f", "Bar", None), + ImportItem("d.e.f", "Baz", "Qux"), + ImportItem("g.h.i", "Y", None), + ImportItem("g.h.i", "V", "W"), + ImportItem("a.b.c", "F", None), ], ) @@ -440,18 +441,18 @@ class TestAddImportsCodemod(CodemodTest): before, after, [ - ("a.b.c", "D", None), - ("a.b.c", "F", None), - ("d.e.f", "Foo", None), - ("sys", None, None), - ("g.h.i", "Z", None), - ("g.h.i", "X", None), - ("d.e.f", "Bar", None), - ("g.h.i", "Y", None), - ("foo", None, None), - ("a.b.c", "F", None), - ("bar", None, "baz"), - ("qux", None, "quux"), + ImportItem("a.b.c", "D", None), + ImportItem("a.b.c", "F", None), + ImportItem("d.e.f", "Foo", None), + ImportItem("sys", None, None), + ImportItem("g.h.i", "Z", None), + ImportItem("g.h.i", "X", None), + ImportItem("d.e.f", "Bar", None), + ImportItem("g.h.i", "Y", None), + ImportItem("foo", None, None), + ImportItem("a.b.c", "F", None), + ImportItem("bar", None, "baz"), + ImportItem("qux", None, "quux"), ], ) @@ -481,7 +482,7 @@ class TestAddImportsCodemod(CodemodTest): return 5 """ - self.assertCodemod(before, after, [("a.b.c", "D", None)]) + self.assertCodemod(before, after, [ImportItem("a.b.c", "D", None)]) def test_add_import_preserve_doctring_multiples(self) -> None: """ @@ -511,7 +512,9 @@ class TestAddImportsCodemod(CodemodTest): """ self.assertCodemod( - before, after, [("a.b.c", "D", None), ("argparse", None, None)] + before, + after, + [ImportItem("a.b.c", "D", None), ImportItem("argparse", None, None)], ) def test_strict_module_no_imports(self) -> None: @@ -532,7 +535,7 @@ class TestAddImportsCodemod(CodemodTest): pass """ - self.assertCodemod(before, after, [("argparse", None, None)]) + self.assertCodemod(before, after, [ImportItem("argparse", None, None)]) def test_strict_module_with_imports(self) -> None: """ @@ -556,7 +559,7 @@ class TestAddImportsCodemod(CodemodTest): pass """ - self.assertCodemod(before, after, [("argparse", None, None)]) + self.assertCodemod(before, after, [ImportItem("argparse", None, None)]) def test_dont_add_relative_object_simple(self) -> None: """ @@ -585,8 +588,10 @@ class TestAddImportsCodemod(CodemodTest): self.assertCodemod( before, after, - [("a.b.c", "D", None)], - context_override=CodemodContext(full_module_name="a.b.foobar"), + [ImportItem("a.b.c", "D", None)], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), ) def test_add_object_relative_modify_simple(self) -> None: @@ -616,8 +621,10 @@ class TestAddImportsCodemod(CodemodTest): self.assertCodemod( before, after, - [("a.b.c", "D", None)], - context_override=CodemodContext(full_module_name="a.b.foobar"), + [ImportItem("a.b.c", "D", None)], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), ) def test_import_order(self) -> None: @@ -634,8 +641,231 @@ class TestAddImportsCodemod(CodemodTest): self.assertCodemod( before, after, - [("a", "f", None), ("a", "g", "y"), ("a", "c", None), ("a", "d", "x")], - context_override=CodemodContext(full_module_name="a.b.foobar"), + [ + ImportItem("a", "f", None), + ImportItem("a", "g", "y"), + ImportItem("a", "c", None), + ImportItem("a", "d", "x"), + ], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), + ) + + def test_add_explicit_relative(self) -> None: + """ + Should add a relative import from .. . + """ + + before = """ + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + after = """ + from .. import a + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + + self.assertCodemod( + before, + after, + [ImportItem("a", None, None, 2)], + ) + + def test_add_explicit_relative_alias(self) -> None: + """ + Should add a relative import from .. . + """ + + before = """ + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + after = """ + from .. import a as foo + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + + self.assertCodemod( + before, + after, + [ImportItem("a", None, "foo", 2)], + ) + + def test_add_explicit_relative_object_simple(self) -> None: + """ + Should add a relative import. + """ + + before = """ + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + after = """ + from ..a import B + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + + self.assertCodemod( + before, + after, + [ImportItem("a", "B", None, 2)], + ) + + def test_dont_add_explicit_relative_object_simple(self) -> None: + """ + Should not add object as an import since it exists. + """ + + before = """ + from ..c import D + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + after = """ + from ..c import D + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + + self.assertCodemod( + before, + after, + [ImportItem("c", "D", None, 2)], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), + ) + + def test_add_object_explicit_relative_modify_simple(self) -> None: + """ + Should modify existing import to add new object. + """ + + before = """ + from ..c import E, F + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + after = """ + from ..c import D, E, F + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + + self.assertCodemod( + before, + after, + [ImportItem("c", "D", None, 2)], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), + ) + + def test_add_object_resolve_explicit_relative_modify_simple(self) -> None: + """ + Should merge a relative new module with an absolute existing one. + """ + + before = """ + from ..c import E, F + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + after = """ + from ..c import D, E, F + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + + self.assertCodemod( + before, + after, + [ImportItem("c", "D", None, 2)], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), + ) + + def test_add_object_resolve_dotted_relative_modify_simple(self) -> None: + """ + Should merge a relative new module with an absolute existing one. + """ + + before = """ + from ..c import E, F + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + after = """ + from ..c import D, E, F + + def foo() -> None: + pass + + def bar() -> int: + return 5 + """ + + self.assertCodemod( + before, + after, + [ImportItem("..c", "D", None)], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), ) def test_import_in_docstring_module(self) -> None: @@ -655,6 +885,144 @@ class TestAddImportsCodemod(CodemodTest): self.assertCodemod( before, after, - [("__future__", "annotations", None)], - context_override=CodemodContext(full_module_name="a.b.foobar"), + [ImportItem("__future__", "annotations", None)], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), + ) + + def test_import_in_module_with_standalone_string_not_a_docstring( + self, + ) -> None: + """ + The import should be added after the __future__ imports. + """ + before = """ + from __future__ import annotations + from __future__ import division + + '''docstring.''' + def func(): + pass + """ + after = """ + from __future__ import annotations + from __future__ import division + import typing + + '''docstring.''' + def func(): + pass + """ + + self.assertCodemod( + before, + after, + [ImportItem("typing", None, None)], + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), + ) + + def test_add_at_first_block(self) -> None: + """ + Should add the import only at the end of the first import block. + """ + + before = """ + import a + import b + + e() + + import c + import d + """ + + after = """ + import a + import b + import e + + e() + + import c + import d + """ + + self.assertCodemod(before, after, [ImportItem("e", None, None)]) + + def test_add_no_import_block_before_statement(self) -> None: + """ + Should add the import before the call. + """ + + before = """ + '''docstring''' + e() + import a + import b + """ + + after = """ + '''docstring''' + import c + + e() + import a + import b + """ + + self.assertCodemod(before, after, [ImportItem("c", None, None)]) + + def test_do_not_add_existing(self) -> None: + """ + Should not add the new object import at existing import since it's not at the top + """ + + before = """ + '''docstring''' + e() + import a + import b + from c import f + """ + + after = """ + '''docstring''' + from c import e + + e() + import a + import b + from c import f + """ + + self.assertCodemod(before, after, [ImportItem("c", "e", None)]) + + def test_add_existing_at_top(self) -> None: + """ + Should add new import at exisitng from import at top + """ + + before = """ + '''docstring''' + from c import d + e() + import a + import b + from c import f + """ + + after = """ + '''docstring''' + from c import e, x, d + e() + import a + import b + from c import f + """ + + self.assertCodemod( + before, after, [ImportItem("c", "x", None), ImportItem("c", "e", None)] ) diff --git a/libcst/codemod/visitors/tests/test_apply_type_annotations.py b/libcst/codemod/visitors/tests/test_apply_type_annotations.py index 90e7b58c..e7b25124 100644 --- a/libcst/codemod/visitors/tests/test_apply_type_annotations.py +++ b/libcst/codemod/visitors/tests/test_apply_type_annotations.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016-present, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -11,16 +11,202 @@ from typing import Type from libcst import parse_module from libcst.codemod import Codemod, CodemodContext, CodemodTest -from libcst.codemod.visitors._apply_type_annotations import ApplyTypeAnnotationsVisitor +from libcst.codemod.visitors._apply_type_annotations import ( + AnnotationCounts, + ApplyTypeAnnotationsVisitor, +) from libcst.testing.utils import data_provider class TestApplyAnnotationsVisitor(CodemodTest): TRANSFORM: Type[Codemod] = ApplyTypeAnnotationsVisitor + def run_simple_test_case( + self, + stub: str, + before: str, + after: str, + ) -> None: + context = CodemodContext() + ApplyTypeAnnotationsVisitor.store_stub_in_context( + context, parse_module(textwrap.dedent(stub.rstrip())) + ) + self.assertCodemod(before, after, context_override=context) + + def run_test_case_with_flags( + self, + stub: str, + before: str, + after: str, + **kwargs: bool, + ) -> None: + context = CodemodContext() + ApplyTypeAnnotationsVisitor.store_stub_in_context( + context, parse_module(textwrap.dedent(stub.rstrip())) + ) + # Test setting the flag on the codemod instance. + # pyre-fixme[6]: Expected `Optional[typing.Sequence[str]]` for 4th param but + # got `Dict[str, bool]`. + # pyre-fixme[6]: Expected `Optional[str]` for 4th param but got `Dict[str, + # bool]`. + # pyre-fixme[6]: Expected `bool` for 4th param but got `Dict[str, bool]`. + self.assertCodemod(before, after, context_override=context, **kwargs) + + # Test setting the flag when storing the stub in the context. + context = CodemodContext() + ApplyTypeAnnotationsVisitor.store_stub_in_context( + context, + parse_module(textwrap.dedent(stub.rstrip())), + **kwargs, + ) + self.assertCodemod(before, after, context_override=context) + + def run_test_case_twice( + self, + stub: str, + before: str, + after: str, + ) -> None: + context = CodemodContext() + ApplyTypeAnnotationsVisitor.store_stub_in_context( + context, parse_module(textwrap.dedent(stub.rstrip())) + ) + r1 = ApplyTypeAnnotationsVisitor(context).transform_module( + parse_module(textwrap.dedent(before.rstrip())) + ) + + context = CodemodContext() + ApplyTypeAnnotationsVisitor.store_stub_in_context( + context, parse_module(textwrap.dedent(stub.rstrip())) + ) + r2 = ApplyTypeAnnotationsVisitor(context).transform_module(r1) + assert r1.code == textwrap.dedent(after.rstrip()) + assert r2.code == textwrap.dedent(after.rstrip()) + @data_provider( - ( - ( + { + "simple": ( + """ + bar: int = ... + """, + """ + bar = foo() + """, + """ + bar: int = foo() + """, + ), + "simple_with_existing": ( + """ + bar: int = ... + """, + """ + bar: str = foo() + """, + """ + bar: str = foo() + """, + ), + "with_separate_declaration": ( + """ + x: int = ... + y: int = ... + z: int = ... + """, + """ + x = y = z = 1 + """, + """ + x: int + y: int + z: int + + x = y = z = 1 + """, + ), + "needs_added_import": ( + """ + FOO: a.b.Example = ... + """, + """ + FOO = bar() + """, + """ + from a.b import Example + + FOO: Example = bar() + """, + ), + "with_generic": ( + """ + FOO: Union[a.b.Example, int] = ... + """, + """ + FOO = bar() + """, + """ + from a.b import Example + + FOO: Union[Example, int] = bar() + """, + ), + "with_relative_imports": ( + """ + from .relative0 import T0 + from ..relative1 import T1 + from . import relative2 + + x0: typing.Optional[T0] + x1: typing.Optional[T1] + x2: typing.Optional[relative2.T2] + """, + """ + x0 = None + x1 = None + x2 = None + """, + """ + from ..relative1 import T1 + from .relative0 import T0 + from .relative2 import T2 + from typing import Optional + + x0: Optional[T0] = None + x1: Optional[T1] = None + x2: Optional[T2] = None + """, + ), + "splitting_multi_assigns": ( + """ + a: str = ... + x: int = ... + y: int = ... + _: str = ... + z: str = ... + """, + """ + a = 'a' + x, y = 1, 2 + _, z = 'hello world'.split() + """, + """ + x: int + y: int + z: str + + a: str = 'a' + x, y = 1, 2 + _, z = 'hello world'.split() + """, + ), + } + ) + def test_annotate_globals(self, stub: str, before: str, after: str) -> None: + self.run_simple_test_case(stub=stub, before=before, after=after) + + @data_provider( + { + "basic_return": ( """ def foo() -> int: ... """, @@ -33,7 +219,33 @@ class TestApplyAnnotationsVisitor(CodemodTest): return 1 """, ), - ( + "return_with_existing_param": ( + """ + def foo(x: int) -> str: ... + """, + """ + def foo(x: str): + pass + """, + """ + def foo(x: str) -> str: + pass + """, + ), + "param_with_existng_return": ( + """ + def foo(x: int) -> int: ... + """, + """ + def foo(x) -> int: + return x + """, + """ + def foo(x: int) -> int: + return x + """, + ), + "return_and_params_general": ( """ def foo( b: str, c: int = ..., *, d: str = ..., e: int, f: int = ... @@ -52,7 +264,22 @@ class TestApplyAnnotationsVisitor(CodemodTest): return 1 """, ), - ( + "with_import__basic": ( + """ + def foo() -> bar.Baz: ... + """, + """ + def foo(): + return returns_baz() + """, + """ + from bar import Baz + + def foo() -> Baz: + return returns_baz() + """, + ), + "with_import__unneeded_explicit": ( """ import bar @@ -70,10 +297,8 @@ class TestApplyAnnotationsVisitor(CodemodTest): """, ), # Keep the existing `import A` instead of using `from A import B`. - ( + "with_import__preexisting": ( """ - import bar - def foo() -> bar.Baz: ... """, """ @@ -89,145 +314,84 @@ class TestApplyAnnotationsVisitor(CodemodTest): return returns_baz() """, ), - ( + "with_as_import": ( """ - def foo() -> int: ... + from bar import A as B - class A: - def foo() -> str: ... + def foo(x: B): ... """, """ - def foo(): - return 1 - class A: - def foo(): - return '' + def foo(x): + pass """, """ - def foo() -> int: - return 1 - class A: - def foo() -> str: - return '' + from bar import A as B + + def foo(x: B): + pass """, ), - ( + "with_conflicting_imported_symbols": ( """ - bar: int = ... + import a.foo as bar + from b.c import Baz as B + import d + + def f(a: d.A, b: B) -> bar.B: ... """, """ - bar = foo() + def f(a, b): + pass """, """ - bar: int = foo() + import a.foo as bar + from b.c import Baz as B + from d import A + + def f(a: A, b: B) -> bar.B: + pass """, ), - ( + "with_conflicts_between_imported_and_existing_symbols": ( """ - bar: int = ... + from a import A + from b import B + + def f(x: A, y: B) -> None: ... """, """ - bar: str = foo() + from b import A, B + + def f(x, y): + y = A(x) + z = B(y) """, """ - bar: str = foo() + from b import A, B + import a + + def f(x: a.A, y: B) -> None: + y = A(x) + z = B(y) """, ), - ( + "with_nested_import": ( """ - bar: int = ... - class A: - bar: str = ... + def foo(x: django.http.response.HttpResponse) -> str: + ... """, """ - bar = foo() - class A: - bar = foobar() + def foo(x) -> str: + pass """, """ - bar: int = foo() - class A: - bar: str = foobar() + from django.http.response import HttpResponse + + def foo(x: HttpResponse) -> str: + pass """, ), - ( - """ - bar: int = ... - class A: - bar: str = ... - """, - """ - bar = foo() - class A: - bar = foobar() - """, - """ - bar: int = foo() - class A: - bar: str = foobar() - """, - ), - ( - """ - a: int = ... - b: str = ... - """, - """ - def foo() -> Tuple[int, str]: - return (1, "") - - a, b = foo() - """, - """ - a: int - b: str - - def foo() -> Tuple[int, str]: - return (1, "") - - a, b = foo() - """, - ), - ( - """ - a: int = ... - b: str = ... - """, - """ - def foo() -> Tuple[int, str]: - return (1, "") - - [a, b] = foo() - """, - """ - a: int - b: str - - def foo() -> Tuple[int, str]: - return (1, "") - - [a, b] = foo() - """, - ), - ( - """ - x: int = ... - y: int = ... - z: int = ... - """, - """ - x = y = z = 1 - """, - """ - x: int - y: int - z: int - - x = y = z = 1 - """, - ), - # Don't add annotations if one is already present - ( + "no_override_existing": ( """ def foo(x: int = 1) -> List[str]: ... """, @@ -244,7 +408,7 @@ class TestApplyAnnotationsVisitor(CodemodTest): return [''] """, ), - ( + "with_typing_import__basic": ( """ from typing import List @@ -261,7 +425,7 @@ class TestApplyAnnotationsVisitor(CodemodTest): return [1] """, ), - ( + "with_typing_import__add_to_preexisting_line": ( """ from typing import List @@ -280,139 +444,7 @@ class TestApplyAnnotationsVisitor(CodemodTest): return [1] """, ), - ( - """ - a: Dict[str, int] = ... - """, - """ - def foo() -> int: - return 1 - a = {} - a['x'] = foo() - """, - """ - def foo() -> int: - return 1 - a: Dict[str, int] = {} - a['x'] = foo() - """, - ), - # Test that tuples with subscripts are handled correctly - # and top level annotations are added in the correct place - ( - """ - a: int = ... - """, - """ - from typing import Tuple - - def foo() -> Tuple[str, int]: - return "", 1 - - b['z'], a = foo() - """, - """ - from typing import Tuple - a: int - - def foo() -> Tuple[str, int]: - return "", 1 - - b['z'], a = foo() - """, - ), - # Don't override existing default parameter values - ( - """ - class B: - def foo(self, x: int = a.b.A.__add__(1), y=None) -> int: ... - """, - """ - class B: - def foo(self, x = A + 1, y = None) -> int: - return x - - """, - """ - class B: - def foo(self, x: int = A + 1, y = None) -> int: - return x - """, - ), - ( - """ - def foo(x: int) -> int: ... - """, - """ - def foo(x) -> int: - return x - """, - """ - def foo(x: int) -> int: - return x - """, - ), - ( - """ - async def a(r: Request, z=None) -> django.http.response.HttpResponse: ... - async def b(r: Request, z=None) -> django.http.response.HttpResponse: ... - async def c(r: Request, z=None) -> django.http.response.HttpResponse: ... - """, - """ - async def a(r: Request, z=None): ... - async def b(r: Request, z=None): ... - async def c(r: Request, z=None): ... - """, - """ - from django.http.response import HttpResponse - - async def a(r: Request, z=None) -> HttpResponse: ... - async def b(r: Request, z=None) -> HttpResponse: ... - async def c(r: Request, z=None) -> HttpResponse: ... - """, - ), - ( - """ - FOO: a.b.Example = ... - """, - """ - FOO = bar() - """, - """ - from a.b import Example - - FOO: Example = bar() - """, - ), - ( - """ - FOO: Union[a.b.Example, int] = ... - """, - """ - FOO = bar() - """, - """ - from a.b import Example - - FOO: Union[Example, int] = bar() - """, - ), - ( - """ - def foo(x: int) -> List[Union[a.b.Example, str]]: ... - """, - """ - def foo(x: int): - return [barfoo(), ""] - """, - """ - from a.b import Example - - def foo(x: int) -> List[Union[Example, str]]: - return [barfoo(), ""] - """, - ), - ( + "add_imports_for_nested_types": ( """ def foo(x: int) -> Optional[a.b.Example]: ... """, @@ -427,22 +459,39 @@ class TestApplyAnnotationsVisitor(CodemodTest): pass """, ), - ( + "add_imports_for_generics": ( """ - def foo(x: int) -> str: ... + def foo(x: int) -> typing.Optional[Example]: ... """, """ - def foo(x: str): + def foo(x: int): pass """, """ - def foo(x: str) -> str: + from typing import Optional + + def foo(x: int) -> Optional[Example]: pass """, ), - ( + "add_imports_for_doubly_nested_types": ( """ - def foo(x: int)-> Union[ + def foo(x: int) -> List[Union[a.b.Example, str]]: ... + """, + """ + def foo(x: int): + return [barfoo(), ""] + """, + """ + from a.b import Example + + def foo(x: int) -> List[Union[Example, str]]: + return [barfoo(), ""] + """, + ), + "deeply_nested_example_with_multiline_annotation": ( + """ + def foo(x: int) -> Union[ Coroutine[Any, Any, django.http.response.HttpResponse], str ]: ... @@ -460,41 +509,7 @@ class TestApplyAnnotationsVisitor(CodemodTest): pass """, ), - ( - """ - def foo(x: django.http.response.HttpResponse) -> str: - pass - """, - """ - def foo(x) -> str: - pass - """, - """ - from django.http.response import HttpResponse - - def foo(x: HttpResponse) -> str: - pass - """, - ), - ( - """ - def foo() -> b.b.A: ... - """, - """ - from c import A as B, bar - - def foo(): - return bar() - """, - """ - from c import A as B, bar - from b.b import A - - def foo() -> A: - return bar() - """, - ), - ( + "do_not_add_imports_inside_of_Type": ( """ from typing import Type @@ -516,7 +531,98 @@ class TestApplyAnnotationsVisitor(CodemodTest): return A """, ), - ( + # The following two tests verify that we can annotate functions + # with async and decorator information, regardless of whether this + # is part of the stub file. + "async_with_decorators__full_stub": ( + """ + @second_decorator + @first_decorator(5) + async def async_with_decorators(r: Request, b: bool) -> django.http.response.HttpResponse: ... + """, + """ + @second_decorator + @first_decorator(5) + async def async_with_decorators(r, b): + return respond(r, b) + """, + """ + from django.http.response import HttpResponse + + @second_decorator + @first_decorator(5) + async def async_with_decorators(r: Request, b: bool) -> HttpResponse: + return respond(r, b) + """, + ), + "async_with_decorators__bare_stub": ( + """ + def async_with_decorators(r: Request, b: bool) -> django.http.response.HttpResponse: ... + """, + """ + @second_decorator + @first_decorator(5) + async def async_with_decorators(r, b): + return respond(r, b) + """, + """ + from django.http.response import HttpResponse + + @second_decorator + @first_decorator(5) + async def async_with_decorators(r: Request, b: bool) -> HttpResponse: + return respond(r, b) + """, + ), + "with_variadic_arguments": ( + """ + def incomplete_stubs_with_stars( + x: int, + *args, + **kwargs, + ) -> None: ... + """, + """ + def incomplete_stubs_with_stars( + x, + *args: P.args, + **kwargs: P.kwargs, + ): + pass + """, + """ + def incomplete_stubs_with_stars( + x: int, + *args: P.args, + **kwargs: P.kwargs, + ) -> None: + pass + """, + ), + # test cases named with the REQUIRES_PREEXISTING prefix are verifying + # that certain special cases work if the stub and the existing code + # happen to align well, but none of these cases are guaranteed to work + # in general - for example duplicate type names will generally result in + # incorrect codemod. + "REQURIES_PREEXISTING_new_import_okay_if_existing_aliased": ( + """ + def foo() -> b.b.A: ... + """, + """ + from c import A as B, bar + + def foo(): + return bar() + """, + """ + from c import A as B, bar + from b.b import A + + def foo() -> A: + return bar() + """, + ), + "REQUIRES_PREEXISTING_fully_qualified_with_alias": ( """ def foo() -> db.Connection: ... """, @@ -531,7 +637,7 @@ class TestApplyAnnotationsVisitor(CodemodTest): return db.Connection() """, ), - ( + "REQURIRES_PREEXISTING_fully_qualified_typing": ( """ def foo() -> typing.Sequence[int]: ... """, @@ -546,85 +652,114 @@ class TestApplyAnnotationsVisitor(CodemodTest): return [] """, ), - # Insert a TypedDict class that is not in the source file. - ( - """ - from mypy_extensions import TypedDict + } + ) + def test_annotate_simple_functions( + self, stub: str, before: str, after: str + ) -> None: + self.run_simple_test_case(stub=stub, before=before, after=after) - class MovieTypedDict(TypedDict): - name: str - year: int + @data_provider( + { + "respect_default_values_1": ( + """ + class B: + def foo(self, x: int = a.b.A.__add__(1), y=None) -> int: ... """, """ - def foo() -> None: - pass + class B: + def foo(self, x = A + 1, y = None) -> int: + return x + """, """ - from mypy_extensions import TypedDict - - class MovieTypedDict(TypedDict): - name: str - year: int - - def foo() -> None: - pass + class B: + def foo(self, x: int = A + 1, y = None) -> int: + return x """, ), - # Insert only the TypedDict class that is not in the source file. - ( + "respect_default_values_2": ( """ - from mypy_extensions import TypedDict + from typing import Optional - class MovieTypedDict(TypedDict): - name: str - year: int - - class ExistingMovieTypedDict(TypedDict): - name: str - year: int + class A: + def foo(self, atticus, b: Optional[int] = None, c: bool = False): ... """, """ - from mypy_extensions import TypedDict - - class ExistingMovieTypedDict(TypedDict): - name: str - year: int - - def foo() -> None: - pass + class A: + def foo(self, atticus, b = None, c = False): ... """, """ - from mypy_extensions import TypedDict + from typing import Optional - class MovieTypedDict(TypedDict): - name: str - year: int - - class ExistingMovieTypedDict(TypedDict): - name: str - year: int - - def foo() -> None: - pass + class A: + def foo(self, atticus, b: Optional[int] = None, c: bool = False): ... """, ), - # Sanity check that we don't fail when the stub has relative imports. - # We don't do anything with those imports, though. - ( + } + ) + def test_annotate_classes(self, stub: str, before: str, after: str) -> None: + self.run_simple_test_case(stub=stub, before=before, after=after) + + @data_provider( + { + "method_and_function_of_same_name": ( """ - from .. import hello - def foo() -> typing.Sequence[int]: ... + def foo() -> int: ... + + class A: + def foo() -> str: ... """, """ def foo(): - return [] + return 1 + class A: + def foo(): + return '' """, """ - def foo() -> typing.Sequence[int]: - return [] + def foo() -> int: + return 1 + class A: + def foo() -> str: + return '' """, ), - ( + "global_and_attribute_of_same_name": ( + """ + bar: int = ... + class A: + bar: str = ... + """, + """ + bar = foo() + class A: + bar = foobar() + """, + """ + bar: int = foo() + class A: + bar: str = foobar() + """, + ), + "add_global_annotation_simple_case": ( + """ + a: Dict[str, int] = ... + """, + """ + def foo() -> int: + return 1 + a = {} + a['x'] = foo() + """, + """ + def foo() -> int: + return 1 + a: Dict[str, int] = {} + a['x'] = foo() + """, + ), + "add_global_annotation_with_Type__no_added_import": ( """ from typing import Dict @@ -651,18 +786,289 @@ class TestApplyAnnotationsVisitor(CodemodTest): example: Dict[str, Type[foo.Example]] = { "test": foo() } """, ), - ) + "tuple_assign__add_new_top_level_declarations": ( + """ + a: int = ... + b: str = ... + """, + """ + def foo() -> Tuple[int, str]: + return (1, "") + + a, b = foo() + """, + """ + a: int + b: str + + def foo() -> Tuple[int, str]: + return (1, "") + + a, b = foo() + """, + ), + "list_assign__add_new_top_level_declarations": ( + """ + a: int = ... + b: str = ... + """, + """ + def foo() -> Tuple[int, str]: + return (1, "") + + [a, b] = foo() + """, + """ + a: int + b: str + + def foo() -> Tuple[int, str]: + return (1, "") + + [a, b] = foo() + """, + ), + "tuples_with_subscripts__add_new_toplevel_declaration": ( + """ + a: int = ... + """, + """ + from typing import Tuple + + def foo() -> Tuple[str, int]: + return "", 1 + + b['z'], a = foo() + """, + """ + from typing import Tuple + a: int + + def foo() -> Tuple[str, int]: + return "", 1 + + b['z'], a = foo() + """, + ), + "handle_quoted_annotations": ( + """ + bar: "a.b.Example" + + def f(x: "typing.Union[int, str]") -> "typing.Union[int, str]": ... + + class A: + def f(self: "A") -> "A": ... + """, + """ + bar = Example() + + def f(x): + return x + + class A: + def f(self): + return self + """, + """ + bar: "a.b.Example" = Example() + + def f(x: "typing.Union[int, str]") -> "typing.Union[int, str]": + return x + + class A: + def f(self: "A") -> "A": + return self + """, + ), + } ) - def test_annotate_functions(self, stub: str, before: str, after: str) -> None: - context = CodemodContext() - ApplyTypeAnnotationsVisitor.store_stub_in_context( - context, parse_module(textwrap.dedent(stub.rstrip())) - ) - self.assertCodemod(before, after, context_override=context) + def test_annotate_mixed(self, stub: str, before: str, after: str) -> None: + self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( - ( - ( + { + "insert_new_TypedDict_class_not_in_source_file": ( + """ + from mypy_extensions import TypedDict + + class MovieTypedDict(TypedDict): + name: str + year: int + """, + """ + def foo() -> None: + pass + """, + """ + from mypy_extensions import TypedDict + + class MovieTypedDict(TypedDict): + name: str + year: int + + def foo() -> None: + pass + """, + ), + "insert_only_TypedDict_class_not_already_in_source": ( + """ + from mypy_extensions import TypedDict + + class MovieTypedDict(TypedDict): + name: str + year: int + + class ExistingMovieTypedDict(TypedDict): + name: str + year: int + """, + """ + from mypy_extensions import TypedDict + + class ExistingMovieTypedDict(TypedDict): + name: str + year: int + + def foo() -> None: + pass + """, + """ + from mypy_extensions import TypedDict + + class MovieTypedDict(TypedDict): + name: str + year: int + + class ExistingMovieTypedDict(TypedDict): + name: str + year: int + + def foo() -> None: + pass + """, + ), + } + ) + def test_adding_typed_dicts(self, stub: str, before: str, after: str) -> None: + self.run_simple_test_case(stub=stub, before=before, after=after) + + @data_provider( + { + "insert_new_TypeVar_not_in_source_file": ( + """ + from typing import Dict, TypeVar + + _KT = TypeVar('_KT') + _VT = TypeVar('_VT') + + class UserDict(Dict[_KT, _VT]): + def __init__(self, initialdata: Dict[_KT, _VT] = ...): ... + """, + """ + class UserDict: + def __init__(self, initialdata = None): + pass + """, + """ + from typing import Dict, TypeVar + + _KT = TypeVar('_KT') + _VT = TypeVar('_VT') + + class UserDict: + def __init__(self, initialdata: Dict[_KT, _VT] = None): + pass + """, + ), + "insert_only_used_TypeVar_not_already_in_source": ( + """ + from typing import Dict, TypeVar + + K = TypeVar('K') + V = TypeVar('V') + X = TypeVar('X') + + class UserDict(Dict[K, V]): + def __init__(self, initialdata: Dict[K, V] = ...): ... + """, + """ + from typing import TypeVar + + V = TypeVar('V') + + class UserDict: + def __init__(self, initialdata = None): + pass + + def f(x: V) -> V: + pass + """, + """ + from typing import Dict, TypeVar + + K = TypeVar('K') + + V = TypeVar('V') + + class UserDict: + def __init__(self, initialdata: Dict[K, V] = None): + pass + + def f(x: V) -> V: + pass + """, + ), + "insert_Generic_base_class": ( + """ + from typing import TypeVar + + T = TypeVar('T') + X = TypeVar('X') + + class B(A, Generic[T]): + def f(self, x: T) -> T: ... + """, + """ + from typing import TypeVar + + V = TypeVar('V') + + def f(x: V) -> V: + pass + + class A: + pass + + class B(A): + def f(self, x): + pass + """, + """ + from typing import TypeVar + + T = TypeVar('T') + + V = TypeVar('V') + + def f(x: V) -> V: + pass + + class A: + pass + + class B(A, Generic[T]): + def f(self, x: T) -> T: + pass + """, + ), + } + ) + def test_adding_typevars(self, stub: str, before: str, after: str) -> None: + self.run_simple_test_case(stub=stub, before=before, after=after) + + @data_provider( + { + "required_positional_only_args": ( """ def foo( a: int, /, b: str, c: int = ..., *, d: str = ..., e: int, f: int = ... @@ -681,7 +1087,7 @@ class TestApplyAnnotationsVisitor(CodemodTest): return 1 """, ), - ( + "positional_only_arg_with_default_value": ( """ def foo( a: int, b: int = ..., /, c: int = ..., *, d: str = ..., e: int, f: int = ... @@ -700,50 +1106,910 @@ class TestApplyAnnotationsVisitor(CodemodTest): return 1 """, ), - ) + } ) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `sys.version_info < (3, 8)` to decorator factory `unittest.skipIf`. @unittest.skipIf(sys.version_info < (3, 8), "Unsupported Python version") def test_annotate_functions_py38(self, stub: str, before: str, after: str) -> None: - context = CodemodContext() - ApplyTypeAnnotationsVisitor.store_stub_in_context( - context, parse_module(textwrap.dedent(stub.rstrip())) - ) - self.assertCodemod(before, after, context_override=context) + self.run_simple_test_case(stub=stub, before=before, after=after) @data_provider( - ( - ( + { + "fully_annotated_with_different_stub": ( """ - def fully_annotated_with_different_stub(a: bool, b: bool) -> str: ... + def f(a: bool, b: bool) -> str: ... """, """ - def fully_annotated_with_different_stub(a: int, b: str) -> bool: + def f(a: int, b: str) -> bool: return 'hello' """, """ - def fully_annotated_with_different_stub(a: bool, b: bool) -> str: + def f(a: bool, b: bool) -> str: return 'hello' """, ), - ) + } ) def test_annotate_functions_with_existing_annotations( self, stub: str, before: str, after: str ) -> None: - context = CodemodContext() - ApplyTypeAnnotationsVisitor.store_stub_in_context( - context, parse_module(textwrap.dedent(stub.rstrip())) - ) - # Test setting the overwrite flag on the codemod instance. - self.assertCodemod( - before, after, context_override=context, overwrite_existing_annotations=True - ) - - # Test setting the flag when storing the stub in the context. - context = CodemodContext() - ApplyTypeAnnotationsVisitor.store_stub_in_context( - context, - parse_module(textwrap.dedent(stub.rstrip())), + self.run_test_case_with_flags( + stub=stub, + before=before, + after=after, overwrite_existing_annotations=True, ) - self.assertCodemod(before, after, context_override=context) + + @data_provider( + { + "pep_604": ( + """ + def f(a: int | str, b: int | list[int | list[int | str]]) -> str: ... + """, + """ + def f(a, b): + return 'hello' + """, + """ + def f(a: int | str, b: int | list[int | list[int | str]]) -> str: + return 'hello' + """, + ), + "pep_604_import": ( + """ + from typing import Callable + from collections.abc import Sequence + def f(a: int | str, b: int | list[int | Callable[[str], Sequence]]) -> str: ... + """, + """ + def f(a, b): + return 'hello' + """, + """ + from collections.abc import Sequence + from typing import Callable + + def f(a: int | str, b: int | list[int | Callable[[str], Sequence]]) -> str: + return 'hello' + """, + ), + } + ) + def test_annotate_functions_pep_604( + self, stub: str, before: str, after: str + ) -> None: + self.run_test_case_with_flags( + stub=stub, + before=before, + after=after, + overwrite_existing_annotations=True, + ) + + @data_provider( + { + "import_inside_list": ( + """ + from typing import Callable + from collections.abc import Sequence + def f(a: Callable[[Sequence[int]], int], b: int) -> str: ... + """, + """ + def f(a, b): + return 'hello' + """, + """ + from collections.abc import Sequence + from typing import Callable + + def f(a: Callable[[Sequence[int]], int], b: int) -> str: + return 'hello' + """, + ), + } + ) + def test_annotate_function_nested_imports( + self, stub: str, before: str, after: str + ) -> None: + self.run_test_case_with_flags( + stub=stub, + before=before, + after=after, + overwrite_existing_annotations=True, + ) + + @data_provider( + { + "return_self": ( + """ + class Foo: + def f(self) -> Foo: ... + """, + """ + class Foo: + def f(self): + return self + """, + """ + class Foo: + def f(self) -> "Foo": + return self + """, + ), + "return_forward_reference": ( + """ + class Foo: + def f(self) -> Bar: ... + + class Bar: + ... + """, + """ + class Foo: + def f(self): + return Bar() + + class Bar: + pass + """, + """ + class Foo: + def f(self) -> "Bar": + return Bar() + + class Bar: + pass + """, + ), + "return_backward_reference": ( + """ + class Bar: + ... + + class Foo: + def f(self) -> Bar: ... + """, + """ + class Bar: + pass + + class Foo: + def f(self): + return Bar() + """, + """ + class Bar: + pass + + class Foo: + def f(self) -> Bar: + return Bar() + """, + ), + "return_undefined_name": ( + """ + class Foo: + def f(self) -> Bar: ... + """, + """ + class Foo: + def f(self): + return self + """, + """ + class Foo: + def f(self) -> Bar: + return self + """, + ), + "parameter_forward_reference": ( + """ + def f(input: Bar) -> None: ... + + class Bar: + ... + """, + """ + def f(input): + pass + + class Bar: + pass + """, + """ + def f(input: "Bar") -> None: + pass + + class Bar: + pass + """, + ), + } + ) + def test_annotate_with_forward_references( + self, stub: str, before: str, after: str + ) -> None: + self.run_test_case_with_flags( + stub=stub, + before=before, + after=after, + overwrite_existing_annotations=True, + ) + + @data_provider( + { + "fully_annotated_with_untyped_stub": ( + """ + def f(a, b): ... + """, + """ + def f(a: bool, b: bool) -> str: + return "hello" + """, + """ + def f(a: bool, b: bool) -> str: + return "hello" + """, + ), + "params_annotated_with_return_from_stub": ( + """ + def f(a, b) -> str: ... + """, + """ + def f(a: bool, b: bool): + return "hello" + """, + """ + def f(a: bool, b: bool) -> str: + return "hello" + """, + ), + "partially_annotated_params_with_partial_stub": ( + """ + def f(a, b: int): ... + """, + """ + def f(a: bool, b) -> str: + return "hello" + """, + """ + def f(a: bool, b: int) -> str: + return "hello" + """, + ), + } + ) + def test_annotate_using_incomplete_stubs( + self, stub: str, before: str, after: str + ) -> None: + """ + Ensure that when the stubs are missing annotations where the existing + code has them, we won't remove the existing annotations even when + `overwrite_existing_annotations` is set to `True`. + """ + self.run_test_case_with_flags( + stub=stub, + before=before, + after=after, + overwrite_existing_annotations=True, + ) + + @data_provider( + { + "basic_example_using_future_annotations": ( + """ + def f() -> bool: ... + """, + """ + def f(): + return True + """, + """ + from __future__ import annotations + + def f() -> bool: + return True + """, + ), + "no_use_future_if_no_changes": ( + """ + def f() -> bool: ... + """, + """ + def f() -> bool: + return True + """, + """ + def f() -> bool: + return True + """, + ), + } + ) + def test_use_future_annotations(self, stub: str, before: str, after: str) -> None: + self.run_test_case_with_flags( + stub=stub, + before=before, + after=after, + use_future_annotations=True, + ) + + @data_provider( + { + "mismatched_signature_posargs": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(a): + return 'hello' + """, + """ + def f(a): + return 'hello' + """, + ), + "mismatched_signature_annotation": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(a, b: int): + return 'hello' + """, + """ + def f(a: bool, b: int) -> str: + return 'hello' + """, + ), + "mismatched_posarg_names": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(x, y): + return 'hello' + """, + """ + def f(x, y): + return 'hello' + """, + ), + "mismatched_return_type": ( + """ + def f(a: bool, b: bool) -> int: ... + """, + """ + def f(a, b) -> str: + return 'hello' + """, + """ + def f(a: bool, b: bool) -> str: + return 'hello' + """, + ), + "matched_signature": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(a: bool, b = False): + return 'hello' + """, + """ + def f(a: bool, b: bool = False) -> str: + return 'hello' + """, + ), + "matched_signature_with_permuted_kwargs": ( + """ + def f(*, a: bool, b: bool) -> str: ... + """, + """ + def f(*, b: bool, a = False): + return 'hello' + """, + """ + def f(*, b: bool, a: bool = False) -> str: + return 'hello' + """, + ), + } + ) + def test_signature_matching(self, stub: str, before: str, after: str) -> None: + self.run_test_case_with_flags( + stub=stub, + before=before, + after=after, + ) + + @data_provider( + { + "mismatched_posarg_names": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(x, y): + return 'hello' + """, + """ + def f(x: bool, y: bool) -> str: + return 'hello' + """, + ), + "mismatched_kwarg_names": ( + """ + def f(p: int, q: str, *, a: bool, b: bool) -> str: ... + """, + """ + def f(p, q, *, x, y): + return 'hello' + """, + """ + def f(p, q, *, x, y): + return 'hello' + """, + ), + } + ) + def test_signature_matching_with_nonstrict_posargs( + self, stub: str, before: str, after: str + ) -> None: + self.run_test_case_with_flags( + stub=stub, before=before, after=after, strict_posargs_matching=False + ) + + @data_provider( + { + "mismatched_signature_posargs": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(a): + return 'hello' + """, + """ + def f(a): + return 'hello' + """, + ), + "mismatched_signature_annotation": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(a, b: int): + return 'hello' + """, + """ + def f(a, b: int): + return 'hello' + """, + ), + "mismatched_posarg_names": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(x, y): + return 'hello' + """, + """ + def f(x, y): + return 'hello' + """, + ), + "mismatched_return_type": ( + """ + def f(a: bool, b: bool) -> int: ... + """, + """ + def f(a, b) -> str: + return 'hello' + """, + """ + def f(a, b) -> str: + return 'hello' + """, + ), + "matched_signature": ( + """ + def f(a: bool, b: bool) -> str: ... + """, + """ + def f(a: bool, b = False): + return 'hello' + """, + """ + def f(a: bool, b: bool = False) -> str: + return 'hello' + """, + ), + "matched_signature_with_permuted_kwargs": ( + """ + def f(*, a: bool, b: bool) -> str: ... + """, + """ + def f(*, b: bool, a = False): + return 'hello' + """, + """ + def f(*, b: bool, a: bool = False) -> str: + return 'hello' + """, + ), + } + ) + def test_signature_matching_with_strict_annotation_matching( + self, stub: str, before: str, after: str + ) -> None: + self.run_test_case_with_flags( + stub=stub, before=before, after=after, strict_annotation_matching=True + ) + + @data_provider( + { + "test_counting_parameters_and_returns": ( + """ + def f(counted: int, not_counted) -> Counted: ... + + def g(not_counted: int, counted: str) -> Counted: ... + + def h(counted: int) -> NotCounted: ... + + def not_in_module(x: int, y: int) -> str: ... + """, + """ + def f(counted, not_counted): + return Counted() + + def g(not_counted: int, counted): + return Counted() + + def h(counted) -> NotCounted: + return Counted() + """, + """ + def f(counted: int, not_counted) -> Counted: + return Counted() + + def g(not_counted: int, counted: str) -> Counted: + return Counted() + + def h(counted: int) -> NotCounted: + return Counted() + """, + AnnotationCounts( + parameter_annotations=3, + return_annotations=2, + ), + True, + ), + "test_counting_globals_classes_and_attributes": ( + """ + global0: int = ... + global1: int + + class InModule: + attr_will_be_found: int + attr_will_not_be_found: int + + class NotInModule: + attr: int + """, + """ + global0 = 1 + global1, global2 = (1, 1) + + class InModule: + attr_will_be_found = 0 + def __init__(self): + self.attr_will_not_be_found = 1 + """, + """ + global1: int + + class NotInModule: + attr: int + + global0: int = 1 + global1, global2 = (1, 1) + + class InModule: + attr_will_be_found: int = 0 + def __init__(self): + self.attr_will_not_be_found = 1 + + """, + AnnotationCounts( + global_annotations=2, + attribute_annotations=1, + classes_added=1, + ), + True, + ), + "test_counting_no_changes": ( + """ + class C: + attr_will_not_be_found: bar.X + """, + """ + class C: + def __init__(self): + self.attr_will_not_be_found = None + """, + """ + class C: + def __init__(self): + self.attr_will_not_be_found = None + """, + AnnotationCounts(), + False, + ), + } + ) + def test_count_annotations( + self, + stub: str, + before: str, + after: str, + annotation_counts: AnnotationCounts, + any_changes_applied: bool, + ) -> None: + stub = self.make_fixture_data(stub) + before = self.make_fixture_data(before) + after = self.make_fixture_data(after) + + context = CodemodContext() + ApplyTypeAnnotationsVisitor.store_stub_in_context( + context=context, stub=parse_module(stub) + ) + visitor = ApplyTypeAnnotationsVisitor(context=context) + + output_code = visitor.transform_module(parse_module(before)).code + + self.assertEqual(after, output_code) + self.assertEqual(str(annotation_counts), str(visitor.annotation_counts)) + self.assertEqual( + any_changes_applied, visitor.annotation_counts.any_changes_applied() + ) + + @data_provider( + { + "always_qualify": ( + """ + from a import A + import b + def f(x: A, y: b.B) -> None: ... + """, + """ + def f(x, y): + pass + """, + """ + import a + import b + + def f(x: a.A, y: b.B) -> None: + pass + """, + ), + "never_qualify_typing": ( + """ + from a import A + from b import B + from typing import List + + def f(x: List[A], y: B[A]) -> None: ... + """, + """ + def f(x, y): + pass + """, + """ + import a + import b + from typing import List + + def f(x: List[a.A], y: b.B[a.A]) -> None: + pass + """, + ), + "preserve_explicit_from_import": ( + """ + from a import A + import b + def f(x: A, y: b.B) -> None: ... + """, + """ + from b import B + def f(x, y): + pass + """, + """ + from b import B + import a + + def f(x: a.A, y: B) -> None: + pass + """, + ), + } + ) + def test_signature_matching_with_always_qualify( + self, stub: str, before: str, after: str + ) -> None: + self.run_test_case_with_flags( + stub=stub, before=before, after=after, always_qualify_annotations=True + ) + + @data_provider( + { + "attribute": ( + """ + class C: + x: int + """, + """ + class C: + x = 0 + C.x = 1 + """, + """ + class C: + x: int = 0 + C.x = 1 + """, + ), + "subscript": ( + """ + d: dict[str, int] + """, + """ + d = {} + d["k"] = 0 + """, + """ + d: dict[str, int] = {} + d["k"] = 0 + """, + ), + "starred": ( + """ + a: int + b: list[int] + """, + """ + a, *b = [1, 2, 3] + """, + """ + a: int + b: list[int] + + a, *b = [1, 2, 3] + """, + ), + "name": ( + """ + a: int + """, + """ + a = 0 + """, + """ + a: int = 0 + """, + ), + "list": ( + """ + a: int + """, + """ + [a] = [0] + """, + """ + a: int + + [a] = [0] + """, + ), + "tuple": ( + """ + a: int + """, + """ + (a,) = [0] + """, + """ + a: int + + (a,) = [0] + """, + ), + } + ) + def test_valid_assign_expressions(self, stub: str, before: str, after: str) -> None: + self.run_simple_test_case(stub=stub, before=before, after=after) + + @data_provider( + { + "toplevel": ( + """ + x: int + """, + """ + x = 1 + x = 2 + """, + """ + x: int = 1 + x = 2 + """, + ), + "class": ( + """ + class A: + x: int + """, + """ + class A: + x = 1 + x = 2 + """, + """ + class A: + x: int = 1 + x = 2 + """, + ), + "mixed": ( + """ + x: int + class A: + x: int + """, + """ + x = 1 + class A: + x = 1 + x = 2 + """, + """ + x: int = 1 + class A: + x: int = 1 + x = 2 + """, + ), + } + ) + def test_no_duplicate_annotations(self, stub: str, before: str, after: str) -> None: + self.run_simple_test_case(stub=stub, before=before, after=after) + + @data_provider( + { + "qualifier_jank": ( + """ + from module.submodule import B + M: B + class Foo: ... + """, + """ + from module import B + M = B() + class Foo: pass + """, + """ + from module import B + import module.submodule + + M: module.submodule.B = B() + class Foo: pass + """, + ), + } + ) + def test_idempotent(self, stub: str, before: str, after: str) -> None: + self.run_test_case_twice(stub=stub, before=before, after=after) diff --git a/libcst/codemod/visitors/tests/test_gather_comments.py b/libcst/codemod/visitors/tests/test_gather_comments.py index da93823e..72511842 100644 --- a/libcst/codemod/visitors/tests/test_gather_comments.py +++ b/libcst/codemod/visitors/tests/test_gather_comments.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/visitors/tests/test_gather_exports.py b/libcst/codemod/visitors/tests/test_gather_exports.py index 916eb5a2..3ac9e9ce 100644 --- a/libcst/codemod/visitors/tests/test_gather_exports.py +++ b/libcst/codemod/visitors/tests/test_gather_exports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -47,6 +47,18 @@ class TestGatherExportsVisitor(UnitTest): gatherer = self.gather_exports(code) self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) + def test_gather_exports_simple2(self) -> None: + code = """ + from foo import bar + from biz import baz + + __all__ = ["bar"] + __all__ += ["baz"] + """ + + gatherer = self.gather_exports(code) + self.assertEqual(gatherer.explicit_exported_objects, {"bar", "baz"}) + def test_gather_exports_simple_set(self) -> None: code = """ from foo import bar diff --git a/libcst/codemod/visitors/tests/test_gather_global_names.py b/libcst/codemod/visitors/tests/test_gather_global_names.py new file mode 100644 index 00000000..8a7a7b8b --- /dev/null +++ b/libcst/codemod/visitors/tests/test_gather_global_names.py @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +from libcst import parse_module +from libcst.codemod import CodemodContext, CodemodTest +from libcst.codemod.visitors import GatherGlobalNamesVisitor +from libcst.testing.utils import UnitTest + + +class TestGatherGlobalNamesVisitor(UnitTest): + def gather_global_names(self, code: str) -> GatherGlobalNamesVisitor: + transform_instance = GatherGlobalNamesVisitor( + CodemodContext(full_module_name="a.b.foobar") + ) + input_tree = parse_module(CodemodTest.make_fixture_data(code)) + input_tree.visit(transform_instance) + return transform_instance + + def test_gather_nothing(self) -> None: + code = """ + from a import b + b() + """ + gatherer = self.gather_global_names(code) + self.assertEqual(gatherer.global_names, set()) + self.assertEqual(gatherer.class_names, set()) + self.assertEqual(gatherer.function_names, set()) + + def test_globals(self) -> None: + code = """ + x = 1 + y = 2 + def foo(): pass + class Foo: pass + """ + gatherer = self.gather_global_names(code) + self.assertEqual(gatherer.global_names, {"x", "y"}) + self.assertEqual(gatherer.class_names, {"Foo"}) + self.assertEqual(gatherer.function_names, {"foo"}) + + def test_omit_nested(self) -> None: + code = """ + def foo(): + x = 1 + + class Foo: + def method(self): pass + """ + gatherer = self.gather_global_names(code) + self.assertEqual(gatherer.global_names, set()) + self.assertEqual(gatherer.class_names, {"Foo"}) + self.assertEqual(gatherer.function_names, {"foo"}) diff --git a/libcst/codemod/visitors/tests/test_gather_imports.py b/libcst/codemod/visitors/tests/test_gather_imports.py index b1e2c102..4fbdbad2 100644 --- a/libcst/codemod/visitors/tests/test_gather_imports.py +++ b/libcst/codemod/visitors/tests/test_gather_imports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -12,7 +12,7 @@ from libcst.testing.utils import UnitTest class TestGatherImportsVisitor(UnitTest): def gather_imports(self, code: str) -> GatherImportsVisitor: transform_instance = GatherImportsVisitor( - CodemodContext(full_module_name="a.b.foobar") + CodemodContext(full_module_name="a.b.foobar", full_package_name="a.b") ) input_tree = parse_module(CodemodTest.make_fixture_data(code)) input_tree.visit(transform_instance) diff --git a/libcst/codemod/visitors/tests/test_gather_string_annotation_names.py b/libcst/codemod/visitors/tests/test_gather_string_annotation_names.py index 25f2d070..d3c622a3 100644 --- a/libcst/codemod/visitors/tests/test_gather_string_annotation_names.py +++ b/libcst/codemod/visitors/tests/test_gather_string_annotation_names.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -80,3 +80,14 @@ class TestGatherNamesFromStringAnnotationsVisitor(UnitTest): visitor.names, {"api", "api.http_exceptions", "api.http_exceptions.HttpException"}, ) + + def test_literals(self) -> None: + visitor = self.gather_names( + """ + from typing import Literal + a: Literal["in"] + b: list[Literal["1x"]] + c: Literal["Any"] + """ + ) + self.assertEqual(visitor.names, set()) diff --git a/libcst/codemod/visitors/tests/test_gather_unused_imports.py b/libcst/codemod/visitors/tests/test_gather_unused_imports.py index 5fb3cba2..e6e0d9bb 100644 --- a/libcst/codemod/visitors/tests/test_gather_unused_imports.py +++ b/libcst/codemod/visitors/tests/test_gather_unused_imports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/codemod/visitors/tests/test_remove_imports.py b/libcst/codemod/visitors/tests/test_remove_imports.py index 564cf21e..6e51c515 100644 --- a/libcst/codemod/visitors/tests/test_remove_imports.py +++ b/libcst/codemod/visitors/tests/test_remove_imports.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -17,7 +17,6 @@ from libcst.testing.utils import data_provider class TestRemoveImportsCodemod(CodemodTest): - TRANSFORM = RemoveImportsVisitor def test_noop(self) -> None: @@ -419,7 +418,9 @@ class TestRemoveImportsCodemod(CodemodTest): before, after, [("a.b.c", "qux", None)], - context_override=CodemodContext(full_module_name="a.b.foobar"), + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), ) def test_dont_remove_inuse_importfrom_relative(self) -> None: @@ -446,7 +447,9 @@ class TestRemoveImportsCodemod(CodemodTest): before, after, [("a.b.c", "qux", None)], - context_override=CodemodContext(full_module_name="a.b.foobar"), + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), ) def test_dont_remove_wrong_importfrom_relative(self) -> None: @@ -473,7 +476,9 @@ class TestRemoveImportsCodemod(CodemodTest): before, after, [("a.b.d", "qux", None)], - context_override=CodemodContext(full_module_name="a.b.foobar"), + context_override=CodemodContext( + full_module_name="a.b.foobar", full_package_name="a.b" + ), ) def test_remove_import_complex(self) -> None: @@ -751,7 +756,6 @@ class TestRemoveImportsCodemod(CodemodTest): """ class RemoveBarTransformer(VisitorBasedCodemodCommand): - METADATA_DEPENDENCIES = (QualifiedNameProvider, ScopeProvider) @m.leave( @@ -815,7 +819,6 @@ class TestRemoveImportsCodemod(CodemodTest): """ class RemoveImportTransformer(VisitorBasedCodemodCommand): - METADATA_DEPENDENCIES = (QualifiedNameProvider, ScopeProvider) def visit_ImportFrom(self, node: cst.ImportFrom) -> None: @@ -854,7 +857,6 @@ class TestRemoveImportsCodemod(CodemodTest): """ class RemoveImportTransformer(VisitorBasedCodemodCommand): - METADATA_DEPENDENCIES = (QualifiedNameProvider, ScopeProvider) def visit_Import(self, node: cst.Import) -> None: @@ -886,7 +888,6 @@ class TestRemoveImportsCodemod(CodemodTest): """ class RemoveImportTransformer(VisitorBasedCodemodCommand): - METADATA_DEPENDENCIES = (QualifiedNameProvider, ScopeProvider) def visit_ImportFrom(self, node: cst.ImportFrom) -> None: diff --git a/libcst/display/__init__.py b/libcst/display/__init__.py new file mode 100644 index 00000000..49365a58 --- /dev/null +++ b/libcst/display/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from libcst.display.graphviz import dump_graphviz +from libcst.display.text import dump + +__all__ = [ + "dump", + "dump_graphviz", +] diff --git a/libcst/display/graphviz.py b/libcst/display/graphviz.py new file mode 100644 index 00000000..e6b5b748 --- /dev/null +++ b/libcst/display/graphviz.py @@ -0,0 +1,187 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from __future__ import annotations + +import textwrap +from collections.abc import Sequence + +from libcst import CSTNode +from libcst.helpers import filter_node_fields + + +_syntax_style = ', color="#777777", fillcolor="#eeeeee"' +_value_style = ', color="#3e99ed", fillcolor="#b8d9f8"' + +node_style: dict[str, str] = { + "__default__": "", + "EmptyLine": _syntax_style, + "IndentedBlock": _syntax_style, + "SimpleStatementLine": _syntax_style, + "SimpleWhitespace": _syntax_style, + "TrailingWhitespace": _syntax_style, + "Newline": _syntax_style, + "Comma": _syntax_style, + "LeftParen": _syntax_style, + "RightParen": _syntax_style, + "LeftSquareBracket": _syntax_style, + "RightSquareBracket": _syntax_style, + "LeftCurlyBrace": _syntax_style, + "RightCurlyBrace": _syntax_style, + "BaseSmallStatement": _syntax_style, + "BaseCompoundStatement": _syntax_style, + "SimpleStatementSuite": _syntax_style, + "Colon": _syntax_style, + "Dot": _syntax_style, + "Semicolon": _syntax_style, + "ParenthesizedWhitespace": _syntax_style, + "BaseParenthesizableWhitespace": _syntax_style, + "Comment": _syntax_style, + "Name": _value_style, + "Integer": _value_style, + "Float": _value_style, + "Imaginary": _value_style, + "SimpleString": _value_style, + "FormattedStringText": _value_style, +} +"""Graphviz style for specific CST nodes""" + + +def _create_node_graphviz(node: CSTNode) -> str: + """Creates the graphviz representation of a CST node.""" + node_name = node.__class__.__qualname__ + + if node_name in node_style: + style = node_style[node_name] + else: + style = node_style["__default__"] + + # pyre-ignore[16]: the existence of node.value is checked before usage + if hasattr(node, "value") and isinstance(node.value, str): + line_break = r"\n" + quote = '"' + escaped_quote = r"\"" + value = f"{line_break}<{node.value.replace(quote, escaped_quote)}>" + style = style + ', shape="box"' + else: + value = "" + + return f'{id(node)} [label="{node_name}{value}"{style}]' + + +def _node_repr_recursive( + node: object, + *, + show_defaults: bool, + show_syntax: bool, + show_whitespace: bool, +) -> list[str]: + """Creates the graphviz representation of a CST node, + and of its child nodes.""" + if not isinstance(node, CSTNode): + return [] + + fields = filter_node_fields( + node, + show_defaults=show_defaults, + show_syntax=show_syntax, + show_whitespace=show_whitespace, + ) + + graphviz_lines: list[str] = [_create_node_graphviz(node)] + + for field in fields: + value = getattr(node, field.name) + if isinstance(value, CSTNode): + # Display a single node + graphviz_lines.append(f'{id(node)} -> {id(value)} [label="{field.name}"]') + graphviz_lines.extend( + _node_repr_recursive( + value, + show_defaults=show_defaults, + show_syntax=show_syntax, + show_whitespace=show_whitespace, + ) + ) + continue + + if isinstance(value, Sequence): + # Display a sequence of nodes + for index, child in enumerate(value): + if isinstance(child, CSTNode): + graphviz_lines.append( + rf'{id(node)} -> {id(child)} [label="{field.name}[{index}]"]' + ) + graphviz_lines.extend( + _node_repr_recursive( + child, + show_defaults=show_defaults, + show_syntax=show_syntax, + show_whitespace=show_whitespace, + ) + ) + + return graphviz_lines + + +def dump_graphviz( + node: object, + *, + show_defaults: bool = False, + show_syntax: bool = False, + show_whitespace: bool = False, +) -> str: + """ + Returns a string representation (in graphviz .dot style) of a CST node, + and its child nodes. + + Setting ``show_defaults`` to ``True`` will add fields regardless if their + value is different from the default value. + + Setting ``show_whitespace`` will add whitespace fields and setting + ``show_syntax`` will add syntax fields while respecting the value of + ``show_defaults``. + """ + + graphviz_settings = textwrap.dedent( + r""" + layout=dot; + rankdir=TB; + splines=line; + ranksep=0.5; + nodesep=1.0; + dpi=300; + bgcolor=transparent; + node [ + style=filled, + color="#fb8d3f", + fontcolor="#4b4f54", + fillcolor="#fdd2b3", + fontname="Source Code Pro Semibold", + penwidth="2", + group=main, + ]; + edge [ + color="#999999", + fontcolor="#4b4f54", + fontname="Source Code Pro Semibold", + fontsize=12, + penwidth=2, + ]; + """[ + 1: + ] + ) + + return "\n".join( + ["digraph {", graphviz_settings] + + _node_repr_recursive( + node, + show_defaults=show_defaults, + show_syntax=show_syntax, + show_whitespace=show_whitespace, + ) + + ["}"] + ) diff --git a/libcst/_version.py b/libcst/display/tests/__init__.py similarity index 60% rename from libcst/_version.py rename to libcst/display/tests/__init__.py index fa483e91..7bec24cb 100644 --- a/libcst/_version.py +++ b/libcst/display/tests/__init__.py @@ -1,7 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. - - -LIBCST_VERSION: str = "0.3.14" diff --git a/libcst/display/tests/test_dump_graphviz.py b/libcst/display/tests/test_dump_graphviz.py new file mode 100644 index 00000000..17ce231f --- /dev/null +++ b/libcst/display/tests/test_dump_graphviz.py @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from __future__ import annotations + +from textwrap import dedent +from typing import TYPE_CHECKING + +from libcst import parse_module +from libcst.display import dump_graphviz +from libcst.testing.utils import UnitTest + +if TYPE_CHECKING: + from libcst import Module + + +class CSTDumpGraphvizTest(UnitTest): + """Check dump_graphviz contains CST nodes.""" + + source_code: str = dedent( + r""" + def foo(a: str) -> None: + pass ; + pass + return + """[ + 1: + ] + ) + cst: Module + + @classmethod + def setUpClass(cls) -> None: + cls.cst = parse_module(cls.source_code) + + def _assert_node(self, node_name: str, graphviz_str: str) -> None: + self.assertIn( + node_name, graphviz_str, f"No node {node_name} found in graphviz_dump" + ) + + def _check_essential_nodes_in_tree(self, graphviz_str: str) -> None: + # Check CST nodes are present in graphviz string + self._assert_node("Module", graphviz_str) + self._assert_node("FunctionDef", graphviz_str) + self._assert_node("Name", graphviz_str) + self._assert_node("Parameters", graphviz_str) + self._assert_node("Param", graphviz_str) + self._assert_node("Annotation", graphviz_str) + self._assert_node("IndentedBlock", graphviz_str) + self._assert_node("SimpleStatementLine", graphviz_str) + self._assert_node("Pass", graphviz_str) + self._assert_node("Return", graphviz_str) + + # Check CST values are present in graphviz string + self._assert_node("", graphviz_str) + self._assert_node("", graphviz_str) + self._assert_node("", graphviz_str) + self._assert_node("", graphviz_str) + + def test_essential_tree(self) -> None: + """Check essential nodes are present in the CST graphviz dump.""" + graphviz_str = dump_graphviz(self.cst) + self._check_essential_nodes_in_tree(graphviz_str) + + def test_full_tree(self) -> None: + """Check all nodes are present in the CST graphviz dump.""" + graphviz_str = dump_graphviz( + self.cst, + show_whitespace=True, + show_defaults=True, + show_syntax=True, + ) + self._check_essential_nodes_in_tree(graphviz_str) + + self._assert_node("Semicolon", graphviz_str) + self._assert_node("SimpleWhitespace", graphviz_str) + self._assert_node("Newline", graphviz_str) + self._assert_node("TrailingWhitespace", graphviz_str) + + self._assert_node("<>", graphviz_str) + self._assert_node("< >", graphviz_str) diff --git a/libcst/tests/test_tool.py b/libcst/display/tests/test_dump_text.py similarity index 98% rename from libcst/tests/test_tool.py rename to libcst/display/tests/test_dump_text.py index b5b4aeeb..bbfc6eb0 100644 --- a/libcst/tests/test_tool.py +++ b/libcst/display/tests/test_dump_text.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -10,7 +10,7 @@ from libcst.testing.utils import UnitTest from libcst.tool import dump -class PrettyPrintNodesTest(UnitTest): +class CSTDumpTextTest(UnitTest): def test_full_tree(self) -> None: module = r""" Module( @@ -152,6 +152,10 @@ class PrettyPrintNodesTest(UnitTest): whitespace_before_colon=SimpleWhitespace( value='', ), + type_parameters=None, + whitespace_after_type_parameters=SimpleWhitespace( + value='', + ), ), ], header=[], @@ -243,6 +247,7 @@ class PrettyPrintNodesTest(UnitTest): ), ), asynchronous=None, + type_parameters=None, ), ], encoding='utf-8', @@ -532,6 +537,10 @@ class PrettyPrintNodesTest(UnitTest): whitespace_before_colon=SimpleWhitespace( value='', ), + type_parameters=None, + whitespace_after_type_parameters=SimpleWhitespace( + value='', + ), ), ], header=[], @@ -612,6 +621,7 @@ class PrettyPrintNodesTest(UnitTest): ), ), asynchronous=None, + type_parameters=None, ), ], ) diff --git a/libcst/display/text.py b/libcst/display/text.py new file mode 100644 index 00000000..0e270009 --- /dev/null +++ b/libcst/display/text.py @@ -0,0 +1,133 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from __future__ import annotations + +import dataclasses +from typing import List, Sequence + +from libcst import CSTLogicError, CSTNode +from libcst.helpers import filter_node_fields + +_DEFAULT_INDENT: str = " " + + +def _node_repr_recursive( # noqa: C901 + node: object, + *, + indent: str = _DEFAULT_INDENT, + show_defaults: bool = False, + show_syntax: bool = False, + show_whitespace: bool = False, +) -> List[str]: + if isinstance(node, CSTNode): + # This is a CSTNode, we must pretty-print it. + fields: Sequence[dataclasses.Field[CSTNode]] = filter_node_fields( + node=node, + show_defaults=show_defaults, + show_syntax=show_syntax, + show_whitespace=show_whitespace, + ) + + tokens: List[str] = [node.__class__.__name__] + + if len(fields) == 0: + tokens.append("()") + else: + tokens.append("(\n") + + for field in fields: + child_tokens: List[str] = [field.name, "="] + value = getattr(node, field.name) + + if isinstance(value, (str, bytes)) or not isinstance(value, Sequence): + # Render out the node contents + child_tokens.extend( + _node_repr_recursive( + value, + indent=indent, + show_whitespace=show_whitespace, + show_defaults=show_defaults, + show_syntax=show_syntax, + ) + ) + elif isinstance(value, Sequence): + # Render out a list of individual nodes + if len(value) > 0: + child_tokens.append("[\n") + list_tokens: List[str] = [] + + last_value = len(value) - 1 + for j, v in enumerate(value): + list_tokens.extend( + _node_repr_recursive( + v, + indent=indent, + show_whitespace=show_whitespace, + show_defaults=show_defaults, + show_syntax=show_syntax, + ) + ) + if j != last_value: + list_tokens.append(",\n") + else: + list_tokens.append(",") + + split_by_line = "".join(list_tokens).split("\n") + child_tokens.append( + "\n".join(f"{indent}{t}" for t in split_by_line) + ) + + child_tokens.append("\n]") + else: + child_tokens.append("[]") + else: + raise CSTLogicError("Logic error!") + + # Handle indentation and trailing comma. + split_by_line = "".join(child_tokens).split("\n") + tokens.append("\n".join(f"{indent}{t}" for t in split_by_line)) + tokens.append(",\n") + + tokens.append(")") + + return tokens + else: + # This is a python value, just return the repr + return [repr(node)] + + +def dump( + node: CSTNode, + *, + indent: str = _DEFAULT_INDENT, + show_defaults: bool = False, + show_syntax: bool = False, + show_whitespace: bool = False, +) -> str: + """ + Returns a string representation of the node that contains minimal differences + from the default contruction of the node while also hiding whitespace and + syntax fields. + + Setting ``show_defaults`` to ``True`` will add fields regardless if their + value is different from the default value. + + Setting ``show_whitespace`` will add whitespace fields and setting + ``show_syntax`` will add syntax fields while respecting the value of + ``show_defaults``. + + When all keyword args are set to true, the output of this function is + indentical to the __repr__ method of the node. + """ + return "".join( + _node_repr_recursive( + node, + indent=indent, + show_defaults=show_defaults, + show_syntax=show_syntax, + show_whitespace=show_whitespace, + ) + ) diff --git a/libcst/helpers/__init__.py b/libcst/helpers/__init__.py index 77c4389c..817acc39 100644 --- a/libcst/helpers/__init__.py +++ b/libcst/helpers/__init__.py @@ -1,13 +1,9 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # -from libcst.helpers._statement import ( - get_absolute_module_for_import, - get_absolute_module_for_import_or_raise, -) from libcst.helpers._template import ( parse_template_expression, parse_template_module, @@ -18,12 +14,34 @@ from libcst.helpers.expression import ( get_full_name_for_node, get_full_name_for_node_or_raise, ) -from libcst.helpers.module import insert_header_comments - +from libcst.helpers.module import ( + calculate_module_and_package, + get_absolute_module, + get_absolute_module_for_import, + get_absolute_module_for_import_or_raise, + get_absolute_module_from_package, + get_absolute_module_from_package_for_import, + get_absolute_module_from_package_for_import_or_raise, + insert_header_comments, + ModuleNameAndPackage, +) +from libcst.helpers.node_fields import ( + filter_node_fields, + get_field_default_value, + get_node_fields, + is_default_node_field, + is_syntax_node_field, + is_whitespace_node_field, +) __all__ = [ + "calculate_module_and_package", + "get_absolute_module", "get_absolute_module_for_import", "get_absolute_module_for_import_or_raise", + "get_absolute_module_from_package", + "get_absolute_module_from_package_for_import", + "get_absolute_module_from_package_for_import_or_raise", "get_full_name_for_node", "get_full_name_for_node_or_raise", "ensure_type", @@ -31,4 +49,11 @@ __all__ = [ "parse_template_module", "parse_template_statement", "parse_template_expression", + "ModuleNameAndPackage", + "get_node_fields", + "get_field_default_value", + "is_whitespace_node_field", + "is_syntax_node_field", + "is_default_node_field", + "filter_node_fields", ] diff --git a/libcst/helpers/_statement.py b/libcst/helpers/_statement.py deleted file mode 100644 index a9431b44..00000000 --- a/libcst/helpers/_statement.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# -from typing import Optional - -import libcst as cst -from libcst.helpers.expression import get_full_name_for_node - - -def get_absolute_module_for_import( - current_module: Optional[str], import_node: cst.ImportFrom -) -> Optional[str]: - # First, let's try to grab the module name, regardless of relative status. - module = import_node.module - module_name = get_full_name_for_node(module) if module is not None else None - # Now, get the relative import location if it exists. - num_dots = len(import_node.relative) - if num_dots == 0: - # This is an absolute import, so the module is correct. - return module_name - if current_module is None: - # We don't actually have the current module available, so we can't compute - # the absolute module from relative. - return None - # We have the current module, as well as the relative, let's compute the base. - modules = current_module.split(".") - if len(modules) < num_dots: - # This relative import goes past the base of the repository, so we can't calculate it. - return None - base_module = ".".join(modules[:-num_dots]) - # Finally, if the module name was supplied, append it to the end. - if module_name is not None: - # If we went all the way to the top, the base module should be empty, so we - # should return the relative bit as absolute. Otherwise, combine the base - # module and module name using a dot separator. - base_module = ( - f"{base_module}.{module_name}" if len(base_module) > 0 else module_name - ) - # If they tried to import all the way to the root, return None. Otherwise, - # return the module itself. - return base_module if len(base_module) > 0 else None - - -def get_absolute_module_for_import_or_raise( - current_module: Optional[str], import_node: cst.ImportFrom -) -> str: - module = get_absolute_module_for_import(current_module, import_node) - if module is None: - raise Exception(f"Unable to compute absolute module for {import_node}") - return module diff --git a/libcst/helpers/_template.py b/libcst/helpers/_template.py index b1be6e5c..e205e0af 100644 --- a/libcst/helpers/_template.py +++ b/libcst/helpers/_template.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,7 +9,6 @@ from typing import Dict, Mapping, Optional, Set, Union import libcst as cst from libcst.helpers.common import ensure_type - TEMPLATE_PREFIX: str = "__LIBCST_MANGLED_NAME_" TEMPLATE_SUFFIX: str = "_EMAN_DELGNAM_TSCBIL__" @@ -46,12 +45,12 @@ def unmangled_name(var: str) -> Optional[str]: def mangle_template(template: str, template_vars: Set[str]) -> str: if TEMPLATE_PREFIX in template or TEMPLATE_SUFFIX in template: - raise Exception("Cannot parse a template containing reserved strings") + raise ValueError("Cannot parse a template containing reserved strings") for var in template_vars: original = f"{{{var}}}" if original not in template: - raise Exception( + raise ValueError( f'Template string is missing a reference to "{var}" referred to in kwargs' ) template = template.replace(original, mangled_name(var)) @@ -143,7 +142,7 @@ class TemplateTransformer(cst.CSTTransformer): name for name in template_replacements if name not in supported_vars } if unsupported_vars: - raise Exception( + raise ValueError( f'Template replacement for "{next(iter(unsupported_vars))}" is unsupported' ) @@ -351,7 +350,7 @@ class TemplateChecker(cst.CSTVisitor): def visit_Name(self, node: cst.Name) -> None: for var in self.template_vars: if node.value == mangled_name(var): - raise Exception(f'Template variable "{var}" was not replaced properly') + raise ValueError(f'Template variable "{var}" was not replaced properly') def unmangle_nodes( @@ -425,8 +424,8 @@ def parse_template_statement( if not isinstance( new_statement, (cst.SimpleStatementLine, cst.BaseCompoundStatement) ): - raise Exception( - f"Expected a statement but got a {new_statement.__class__.__name__}!" + raise TypeError( + f"Expected a statement but got a {new_statement.__class__.__qualname__}!" ) new_statement.visit(TemplateChecker({name for name in template_replacements})) return new_statement diff --git a/libcst/helpers/common.py b/libcst/helpers/common.py index 5bf7e460..dee73aa4 100644 --- a/libcst/helpers/common.py +++ b/libcst/helpers/common.py @@ -1,14 +1,14 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # -from typing import Type +from typing import Type, TypeVar -from libcst._types import CSTNodeT +T = TypeVar("T") -def ensure_type(node: object, nodetype: Type[CSTNodeT]) -> CSTNodeT: +def ensure_type(node: object, nodetype: Type[T]) -> T: """ Takes any python object, and a LibCST :class:`~libcst.CSTNode` subclass and refines the type of the python object. This is most useful when you already @@ -19,7 +19,7 @@ def ensure_type(node: object, nodetype: Type[CSTNodeT]) -> CSTNodeT: """ if not isinstance(node, nodetype): - raise Exception( - f"Expected a {nodetype.__name__} but got a {node.__class__.__name__}!" + raise ValueError( + f"Expected a {nodetype.__name__} but got a {node.__class__.__qualname__}!" ) return node diff --git a/libcst/helpers/expression.py b/libcst/helpers/expression.py index 2a93c509..5ae016cf 100644 --- a/libcst/helpers/expression.py +++ b/libcst/helpers/expression.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -38,5 +38,5 @@ def get_full_name_for_node_or_raise(node: Union[str, cst.CSTNode]) -> str: """ full_name = get_full_name_for_node(node) if full_name is None: - raise Exception(f"Not able to parse full name for: {node}") + raise ValueError(f"Not able to parse full name for: {node}") return full_name diff --git a/libcst/helpers/matchers.py b/libcst/helpers/matchers.py new file mode 100644 index 00000000..e641c43c --- /dev/null +++ b/libcst/helpers/matchers.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +from dataclasses import fields, is_dataclass, MISSING + +from libcst import matchers +from libcst._nodes.base import CSTNode + + +def node_to_matcher( + node: CSTNode, *, match_syntactic_trivia: bool = False +) -> matchers.BaseMatcherNode: + """Convert a concrete node to a matcher.""" + if not is_dataclass(node): + raise ValueError(f"{node} is not a CSTNode") + + attrs = {} + for field in fields(node): + name = field.name + child = getattr(node, name) + if not match_syntactic_trivia and field.name.startswith("whitespace"): + # Not all nodes have whitespace fields, some have multiple, but they all + # start with whitespace* + child = matchers.DoNotCare() + elif field.default is not MISSING and child == field.default: + child = matchers.DoNotCare() + # pyre-ignore[29]: Union[MISSING_TYPE, ...] is not a function. + elif field.default_factory is not MISSING and child == field.default_factory(): + child = matchers.DoNotCare() + elif isinstance(child, (list, tuple)): + child = type(child)( + node_to_matcher(item, match_syntactic_trivia=match_syntactic_trivia) + for item in child + ) + elif hasattr(matchers, type(child).__name__): + child = node_to_matcher( + child, match_syntactic_trivia=match_syntactic_trivia + ) + attrs[name] = child + + matcher = getattr(matchers, type(node).__name__) + return matcher(**attrs) diff --git a/libcst/helpers/module.py b/libcst/helpers/module.py index 5f2bddbe..2b2973bf 100644 --- a/libcst/helpers/module.py +++ b/libcst/helpers/module.py @@ -1,15 +1,19 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # +from dataclasses import dataclass from itertools import islice -from typing import List +from pathlib import Path, PurePath +from typing import List, Optional -import libcst +from libcst import Comment, EmptyLine, ImportFrom, Module +from libcst._types import StrPath +from libcst.helpers.expression import get_full_name_for_node -def insert_header_comments(node: libcst.Module, comments: List[str]) -> libcst.Module: +def insert_header_comments(node: Module, comments: List[str]) -> Module: """ Insert comments after last non-empty line in header. Use this to insert one or more comments after any copyright preamble in a :class:`~libcst.Module`. Each comment in @@ -25,7 +29,136 @@ def insert_header_comments(node: libcst.Module, comments: List[str]) -> libcst.M comment_lines = islice(node.header, last_comment_index + 1) empty_lines = islice(node.header, last_comment_index + 1, None) - inserted_lines = [ - libcst.EmptyLine(comment=libcst.Comment(value=comment)) for comment in comments - ] + inserted_lines = [EmptyLine(comment=Comment(value=comment)) for comment in comments] + # pyre-fixme[60]: Concatenation not yet support for multiple variadic tuples: + # `*comment_lines, *inserted_lines, *empty_lines`. return node.with_changes(header=(*comment_lines, *inserted_lines, *empty_lines)) + + +def get_absolute_module( + current_module: Optional[str], module_name: Optional[str], num_dots: int +) -> Optional[str]: + if num_dots == 0: + # This is an absolute import, so the module is correct. + return module_name + if current_module is None: + # We don't actually have the current module available, so we can't compute + # the absolute module from relative. + return None + # We have the current module, as well as the relative, let's compute the base. + modules = current_module.split(".") + if len(modules) < num_dots: + # This relative import goes past the base of the repository, so we can't calculate it. + return None + base_module = ".".join(modules[:-num_dots]) + # Finally, if the module name was supplied, append it to the end. + if module_name is not None: + # If we went all the way to the top, the base module should be empty, so we + # should return the relative bit as absolute. Otherwise, combine the base + # module and module name using a dot separator. + base_module = ( + f"{base_module}.{module_name}" if len(base_module) > 0 else module_name + ) + # If they tried to import all the way to the root, return None. Otherwise, + # return the module itself. + return base_module if len(base_module) > 0 else None + + +def get_absolute_module_for_import( + current_module: Optional[str], import_node: ImportFrom +) -> Optional[str]: + # First, let's try to grab the module name, regardless of relative status. + module = import_node.module + module_name = get_full_name_for_node(module) if module is not None else None + # Now, get the relative import location if it exists. + num_dots = len(import_node.relative) + return get_absolute_module(current_module, module_name, num_dots) + + +def get_absolute_module_for_import_or_raise( + current_module: Optional[str], import_node: ImportFrom +) -> str: + module = get_absolute_module_for_import(current_module, import_node) + if module is None: + raise ValueError(f"Unable to compute absolute module for {import_node}") + return module + + +def get_absolute_module_from_package( + current_package: Optional[str], module_name: Optional[str], num_dots: int +) -> Optional[str]: + if num_dots == 0: + # This is an absolute import, so the module is correct. + return module_name + if current_package is None or current_package == "": + # We don't actually have the current module available, so we can't compute + # the absolute module from relative. + return None + + # see importlib._bootstrap._resolve_name + # https://github.com/python/cpython/blob/3.10/Lib/importlib/_bootstrap.py#L902 + bits = current_package.rsplit(".", num_dots - 1) + if len(bits) < num_dots: + return None + + base = bits[0] + return "{}.{}".format(base, module_name) if module_name else base + + +def get_absolute_module_from_package_for_import( + current_package: Optional[str], import_node: ImportFrom +) -> Optional[str]: + # First, let's try to grab the module name, regardless of relative status. + module = import_node.module + module_name = get_full_name_for_node(module) if module is not None else None + # Now, get the relative import location if it exists. + num_dots = len(import_node.relative) + return get_absolute_module_from_package(current_package, module_name, num_dots) + + +def get_absolute_module_from_package_for_import_or_raise( + current_package: Optional[str], import_node: ImportFrom +) -> str: + module = get_absolute_module_from_package_for_import(current_package, import_node) + if module is None: + raise ValueError(f"Unable to compute absolute module for {import_node}") + return module + + +@dataclass(frozen=True) +class ModuleNameAndPackage: + name: str + package: str + + +def calculate_module_and_package( + repo_root: StrPath, filename: StrPath, use_pyproject_toml: bool = False +) -> ModuleNameAndPackage: + # Given an absolute repo_root and an absolute filename, calculate the + # python module name for the file. + if use_pyproject_toml: + # But also look for pyproject.toml files, indicating nested packages in the repo. + abs_repo_root = Path(repo_root).resolve() + abs_filename = Path(filename).resolve() + package_root = abs_filename.parent + while package_root != abs_repo_root: + if (package_root / "pyproject.toml").exists(): + break + if package_root == package_root.parent: + break + package_root = package_root.parent + + relative_filename = abs_filename.relative_to(package_root) + else: + relative_filename = PurePath(filename).relative_to(repo_root) + relative_filename = relative_filename.with_suffix("") + + # handle special cases + if relative_filename.stem in ["__init__", "__main__"]: + relative_filename = relative_filename.parent + package = name = ".".join(relative_filename.parts) + else: + name = ".".join(relative_filename.parts) + package = ".".join(relative_filename.parts[:-1]) + + return ModuleNameAndPackage(name, package) diff --git a/libcst/helpers/node_fields.py b/libcst/helpers/node_fields.py new file mode 100644 index 00000000..418d6cbb --- /dev/null +++ b/libcst/helpers/node_fields.py @@ -0,0 +1,128 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from __future__ import annotations + +import dataclasses +from typing import TYPE_CHECKING + +from libcst import IndentedBlock, Module +from libcst._nodes.deep_equals import deep_equals + +if TYPE_CHECKING: + from typing import Sequence + + from libcst import CSTNode + + +def get_node_fields(node: CSTNode) -> Sequence[dataclasses.Field[CSTNode]]: + """ + Returns the sequence of a given CST-node's fields. + """ + return dataclasses.fields(node) + + +def is_whitespace_node_field(node: CSTNode, field: dataclasses.Field[CSTNode]) -> bool: + """ + Returns True if a given CST-node's field is a whitespace-related field + (whitespace, indent, header, footer, etc.). + """ + if "whitespace" in field.name: + return True + if "leading_lines" in field.name: + return True + if "lines_after_decorators" in field.name: + return True + if isinstance(node, (IndentedBlock, Module)) and field.name in [ + "header", + "footer", + ]: + return True + if isinstance(node, IndentedBlock) and field.name == "indent": + return True + return False + + +def is_syntax_node_field(node: CSTNode, field: dataclasses.Field[CSTNode]) -> bool: + """ + Returns True if a given CST-node's field is a syntax-related field + (colon, semicolon, dot, encoding, etc.). + """ + if isinstance(node, Module) and field.name in [ + "encoding", + "default_indent", + "default_newline", + "has_trailing_newline", + ]: + return True + type_str = repr(field.type) + if ( + "Sentinel" in type_str + and field.name not in ["star_arg", "star", "posonly_ind"] + and "whitespace" not in field.name + ): + # This is a value that can optionally be specified, so its + # definitely syntax. + return True + + for name in ["Semicolon", "Colon", "Comma", "Dot", "AssignEqual"]: + # These are all nodes that exist for separation syntax + if name in type_str: + return True + + return False + + +def get_field_default_value(field: dataclasses.Field[CSTNode]) -> object: + """ + Returns the default value of a CST-node's field. + """ + if field.default_factory is not dataclasses.MISSING: + # pyre-fixme[29]: `Union[dataclasses._MISSING_TYPE, + # dataclasses._DefaultFactory[object]]` is not a function. + return field.default_factory() + return field.default + + +def is_default_node_field(node: CSTNode, field: dataclasses.Field[CSTNode]) -> bool: + """ + Returns True if a given CST-node's field has its default value. + """ + return deep_equals(getattr(node, field.name), get_field_default_value(field)) + + +def filter_node_fields( + node: CSTNode, + *, + show_defaults: bool, + show_syntax: bool, + show_whitespace: bool, +) -> Sequence[dataclasses.Field[CSTNode]]: + """ + Returns a filtered sequence of a CST-node's fields. + + Setting ``show_whitespace`` to ``False`` will filter whitespace fields. + + Setting ``show_defaults`` to ``False`` will filter fields if their value is equal to + the default value ; while respecting the value of ``show_whitespace``. + + Setting ``show_syntax`` to ``False`` will filter syntax fields ; while respecting + the value of ``show_whitespace`` & ``show_defaults``. + """ + + fields: Sequence[dataclasses.Field[CSTNode]] = dataclasses.fields(node) + # Hide all fields prefixed with "_" + fields = [f for f in fields if f.name[0] != "_"] + # Filter whitespace nodes if needed + if not show_whitespace: + fields = [f for f in fields if not is_whitespace_node_field(node, f)] + # Filter values which aren't changed from their defaults + if not show_defaults: + fields = [f for f in fields if not is_default_node_field(node, f)] + # Filter out values which aren't interesting if needed + if not show_syntax: + fields = [f for f in fields if not is_syntax_node_field(node, f)] + + return fields diff --git a/libcst/helpers/paths.py b/libcst/helpers/paths.py new file mode 100644 index 00000000..00830ce9 --- /dev/null +++ b/libcst/helpers/paths.py @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import os +from contextlib import contextmanager +from pathlib import Path +from typing import Generator + +from libcst._types import StrPath + + +@contextmanager +def chdir(path: StrPath) -> Generator[Path, None, None]: + """ + Temporarily chdir to the given path, and then return to the previous path. + """ + try: + path = Path(path).resolve() + cwd = os.getcwd() + os.chdir(path) + yield path + finally: + os.chdir(cwd) diff --git a/libcst/helpers/tests/__init__.py b/libcst/helpers/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/helpers/tests/__init__.py +++ b/libcst/helpers/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/helpers/tests/test_expression.py b/libcst/helpers/tests/test_expression.py index f80e6082..2b44e14b 100644 --- a/libcst/helpers/tests/test_expression.py +++ b/libcst/helpers/tests/test_expression.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -12,7 +12,7 @@ from libcst.helpers import ( get_full_name_for_node, get_full_name_for_node_or_raise, ) -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class ExpressionTest(UnitTest): diff --git a/libcst/helpers/tests/test_matchers.py b/libcst/helpers/tests/test_matchers.py new file mode 100644 index 00000000..3f2b9b47 --- /dev/null +++ b/libcst/helpers/tests/test_matchers.py @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# + +from libcst import parse_expression, parse_statement +from libcst.helpers.matchers import node_to_matcher +from libcst.matchers import matches +from libcst.testing.utils import data_provider, UnitTest + + +class MatchersTest(UnitTest): + @data_provider( + ( + ('"some string"',), + ("call(some, **kwargs)",), + ("a[b.c]",), + ("[1 for _ in range(99) if False]",), + ) + ) + def test_reflexive_expressions(self, code: str) -> None: + node = parse_expression(code) + matcher = node_to_matcher(node) + self.assertTrue(matches(node, matcher)) + + @data_provider( + ( + ("def foo(a) -> None: pass",), + ("class F: ...",), + ("foo: bar",), + ) + ) + def test_reflexive_statements(self, code: str) -> None: + node = parse_statement(code) + matcher = node_to_matcher(node) + self.assertTrue(matches(node, matcher)) + + def test_whitespace(self) -> None: + code_ws = parse_expression("(foo , bar )") + code = parse_expression("(foo,bar)") + self.assertTrue( + matches( + code, + node_to_matcher(code_ws), + ) + ) + self.assertFalse( + matches( + code, + node_to_matcher(code_ws, match_syntactic_trivia=True), + ) + ) diff --git a/libcst/helpers/tests/test_module.py b/libcst/helpers/tests/test_module.py index c5be94d5..815e1fa2 100644 --- a/libcst/helpers/tests/test_module.py +++ b/libcst/helpers/tests/test_module.py @@ -1,11 +1,24 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # -import libcst -from libcst.helpers import insert_header_comments -from libcst.testing.utils import UnitTest +from pathlib import Path, PurePath +from typing import Any, Optional +from unittest.mock import patch + +import libcst as cst +from libcst.helpers.common import ensure_type +from libcst.helpers.module import ( + calculate_module_and_package, + get_absolute_module_for_import, + get_absolute_module_for_import_or_raise, + get_absolute_module_from_package_for_import, + get_absolute_module_from_package_for_import_or_raise, + insert_header_comments, + ModuleNameAndPackage, +) +from libcst.testing.utils import data_provider, UnitTest class ModuleTest(UnitTest): @@ -18,7 +31,7 @@ class ModuleTest(UnitTest): expected_code = "\n".join( comment_lines + inserted_comments + empty_lines + non_header_line ) - node = libcst.parse_module(original_code) + node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) @@ -26,7 +39,7 @@ class ModuleTest(UnitTest): # No comment case original_code = "\n".join(empty_lines + non_header_line) expected_code = "\n".join(inserted_comments + empty_lines + non_header_line) - node = libcst.parse_module(original_code) + node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) @@ -34,7 +47,7 @@ class ModuleTest(UnitTest): # No empty lines case original_code = "\n".join(comment_lines + non_header_line) expected_code = "\n".join(comment_lines + inserted_comments + non_header_line) - node = libcst.parse_module(original_code) + node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) @@ -45,7 +58,7 @@ class ModuleTest(UnitTest): expected_code = "\n".join( comment_lines + inserted_comments + empty_lines + non_header_line ) - node = libcst.parse_module(original_code) + node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) @@ -53,7 +66,285 @@ class ModuleTest(UnitTest): # No header case original_code = "\n".join(non_header_line) expected_code = "\n".join(inserted_comments + non_header_line) - node = libcst.parse_module(original_code) + node = cst.parse_module(original_code) self.assertEqual( insert_header_comments(node, inserted_comments).code, expected_code ) + + @data_provider( + ( + # Simple imports that are already absolute. + (None, "from a.b import c", "a.b"), + ("x.y.z", "from a.b import c", "a.b"), + # Relative import that can't be resolved due to missing module. + (None, "from ..w import c", None), + # Relative import that goes past the module level. + ("x", "from ...y import z", None), + ("x.y.z", "from .....w import c", None), + ("x.y.z", "from ... import c", None), + # Correct resolution of absolute from relative modules. + ("x.y.z", "from . import c", "x.y"), + ("x.y.z", "from .. import c", "x"), + ("x.y.z", "from .w import c", "x.y.w"), + ("x.y.z", "from ..w import c", "x.w"), + ("x.y.z", "from ...w import c", "w"), + ) + ) + def test_get_absolute_module( + self, + module: Optional[str], + importfrom: str, + output: Optional[str], + ) -> None: + node = ensure_type(cst.parse_statement(importfrom), cst.SimpleStatementLine) + assert len(node.body) == 1, "Unexpected number of statements!" + import_node = ensure_type(node.body[0], cst.ImportFrom) + + self.assertEqual(get_absolute_module_for_import(module, import_node), output) + if output is None: + with self.assertRaises(Exception): + get_absolute_module_for_import_or_raise(module, import_node) + else: + self.assertEqual( + get_absolute_module_for_import_or_raise(module, import_node), output + ) + + @data_provider( + ( + # Simple imports that are already absolute. + (None, "from a.b import c", "a.b"), + ("x/y/z.py", "from a.b import c", "a.b"), + ("x/y/z/__init__.py", "from a.b import c", "a.b"), + # Relative import that can't be resolved due to missing module. + (None, "from ..w import c", None), + # Attempted relative import with no known parent package + ("__init__.py", "from .y import z", None), + ("x.py", "from .y import z", None), + # Relative import that goes past the module level. + ("x.py", "from ...y import z", None), + ("x/y/z.py", "from ... import c", None), + ("x/y/z.py", "from ...w import c", None), + ("x/y/z/__init__.py", "from .... import c", None), + ("x/y/z/__init__.py", "from ....w import c", None), + # Correct resolution of absolute from relative modules. + ("x/y/z.py", "from . import c", "x.y"), + ("x/y/z.py", "from .. import c", "x"), + ("x/y/z.py", "from .w import c", "x.y.w"), + ("x/y/z.py", "from ..w import c", "x.w"), + ("x/y/z/__init__.py", "from . import c", "x.y.z"), + ("x/y/z/__init__.py", "from .. import c", "x.y"), + ("x/y/z/__init__.py", "from ... import c", "x"), + ("x/y/z/__init__.py", "from .w import c", "x.y.z.w"), + ("x/y/z/__init__.py", "from ..w import c", "x.y.w"), + ("x/y/z/__init__.py", "from ...w import c", "x.w"), + ) + ) + def test_get_absolute_module_from_package( + self, + filename: Optional[str], + importfrom: str, + output: Optional[str], + ) -> None: + package = None + if filename is not None: + info = calculate_module_and_package(".", filename) + package = info.package + node = ensure_type(cst.parse_statement(importfrom), cst.SimpleStatementLine) + assert len(node.body) == 1, "Unexpected number of statements!" + import_node = ensure_type(node.body[0], cst.ImportFrom) + + self.assertEqual( + get_absolute_module_from_package_for_import(package, import_node), output + ) + if output is None: + with self.assertRaises(Exception): + get_absolute_module_from_package_for_import_or_raise( + package, import_node + ) + else: + self.assertEqual( + get_absolute_module_from_package_for_import_or_raise( + package, import_node + ), + output, + ) + + @data_provider( + ( + # Nodes without an asname + (cst.ImportAlias(name=cst.Name("foo")), "foo", None), + ( + cst.ImportAlias(name=cst.Attribute(cst.Name("foo"), cst.Name("bar"))), + "foo.bar", + None, + ), + # Nodes with an asname + ( + cst.ImportAlias( + name=cst.Name("foo"), asname=cst.AsName(name=cst.Name("baz")) + ), + "foo", + "baz", + ), + ( + cst.ImportAlias( + name=cst.Attribute(cst.Name("foo"), cst.Name("bar")), + asname=cst.AsName(name=cst.Name("baz")), + ), + "foo.bar", + "baz", + ), + ) + ) + def test_importalias_helpers( + self, alias_node: cst.ImportAlias, full_name: str, alias: Optional[str] + ) -> None: + self.assertEqual(alias_node.evaluated_name, full_name) + self.assertEqual(alias_node.evaluated_alias, alias) + + @data_provider( + ( + # Various files inside the root should give back valid modules. + ( + "/home/username/root", + "/home/username/root/file.py", + ModuleNameAndPackage("file", ""), + ), + ( + "/home/username/root/", + "/home/username/root/file.py", + ModuleNameAndPackage("file", ""), + ), + ( + "/home/username/root/", + "/home/username/root/some/dir/file.py", + ModuleNameAndPackage("some.dir.file", "some.dir"), + ), + # Various special files inside the root should give back valid modules. + ( + "/home/username/root/", + "/home/username/root/some/dir/__init__.py", + ModuleNameAndPackage("some.dir", "some.dir"), + ), + ( + "/home/username/root/", + "/home/username/root/some/dir/__main__.py", + ModuleNameAndPackage("some.dir", "some.dir"), + ), + ( + "c:/Program Files/", + "c:/Program Files/some/dir/file.py", + ModuleNameAndPackage("some.dir.file", "some.dir"), + ), + ( + "c:/Program Files/", + "c:/Program Files/some/dir/__main__.py", + ModuleNameAndPackage("some.dir", "some.dir"), + ), + ), + ) + def test_calculate_module_and_package( + self, + repo_root: str, + filename: str, + module_and_package: Optional[ModuleNameAndPackage], + ) -> None: + self.assertEqual( + calculate_module_and_package(repo_root, filename), module_and_package + ) + + @data_provider( + ( + ("foo/foo/__init__.py", ModuleNameAndPackage("foo", "foo")), + ("foo/foo/file.py", ModuleNameAndPackage("foo.file", "foo")), + ( + "foo/foo/sub/subfile.py", + ModuleNameAndPackage("foo.sub.subfile", "foo.sub"), + ), + ("libs/bar/bar/thing.py", ModuleNameAndPackage("bar.thing", "bar")), + ( + "noproj/some/file.py", + ModuleNameAndPackage("noproj.some.file", "noproj.some"), + ), + ) + ) + def test_calculate_module_and_package_using_pyproject_toml( + self, + rel_path: str, + module_and_package: Optional[ModuleNameAndPackage], + ) -> None: + mock_tree: dict[str, Any] = { + "home": { + "user": { + "root": { + "foo": { + "pyproject.toml": "content", + "foo": { + "__init__.py": "content", + "file.py": "content", + "sub": { + "subfile.py": "content", + }, + }, + }, + "libs": { + "bar": { + "pyproject.toml": "content", + "bar": { + "__init__.py": "content", + "thing.py": "content", + }, + } + }, + "noproj": { + "some": { + "file.py": "content", + } + }, + }, + }, + }, + } + repo_root = Path("/home/user/root").resolve() + fake_root: Path = repo_root.parent.parent.parent + + def mock_exists(path: PurePath) -> bool: + parts = path.relative_to(fake_root).parts + subtree = mock_tree + for part in parts: + if (subtree := subtree.get(part)) is None: + return False + return True + + with patch("pathlib.Path.exists", new=mock_exists): + self.assertEqual( + calculate_module_and_package( + repo_root, repo_root / rel_path, use_pyproject_toml=True + ), + module_and_package, + ) + + @data_provider( + ( + # Providing a file outside the root should raise an exception + ("/home/username/root", "/some/dummy/file.py"), + ("/home/username/root/", "/some/dummy/file.py"), + ("/home/username/root", "/home/username/file.py"), + # some windows tests + ( + "c:/Program Files/", + "d:/Program Files/some/dir/file.py", + ), + ( + "c:/Program Files/other/", + "c:/Program Files/some/dir/file.py", + ), + ) + ) + def test_invalid_module_and_package( + self, + repo_root: str, + filename: str, + ) -> None: + with self.assertRaises(ValueError): + calculate_module_and_package(repo_root, filename) diff --git a/libcst/helpers/tests/test_node_fields.py b/libcst/helpers/tests/test_node_fields.py new file mode 100644 index 00000000..61d5ec21 --- /dev/null +++ b/libcst/helpers/tests/test_node_fields.py @@ -0,0 +1,314 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from unittest import TestCase + +from libcst import ( + Annotation, + CSTNode, + FunctionDef, + IndentedBlock, + Module, + Param, + parse_module, + Pass, + Semicolon, + SimpleStatementLine, +) + +from libcst.helpers import ( + get_node_fields, + is_default_node_field, + is_syntax_node_field, + is_whitespace_node_field, +) + + +class _NodeFieldsTest(TestCase): + """Node fields related tests.""" + + module: Module + annotation: Annotation + param: Param + _pass: Pass + semicolon: Semicolon + statement: SimpleStatementLine + indent: IndentedBlock + function: FunctionDef + + @classmethod + def setUpClass(cls) -> None: + """Parse a simple CST and references interesting nodes.""" + cls.module = parse_module( + "def foo(a: str) -> None:\n pass ; pass\n return\n" + ) + # /!\ Direct access to nodes + # This is done for test purposes on a known CST + # -> For "real code", use visitors to do this "the correct way" + + # pyre-ignore[8]: direct access for tests + cls.function = cls.module.body[0] + cls.param = cls.function.params.params[0] + # pyre-ignore[8]: direct access for tests + cls.annotation = cls.param.annotation + # pyre-ignore[8]: direct access for tests + cls.indent = cls.function.body + # pyre-ignore[8]: direct access for tests + cls.statement = cls.indent.body[0] + # pyre-ignore[8]: direct access for tests + cls._pass = cls.statement.body[0] + # pyre-ignore[8]: direct access for tests + cls.semicolon = cls.statement.body[0].semicolon + + def test__cst_correctness(self) -> None: + """Test that the CST is correctly parsed.""" + self.assertIsInstance(self.module, Module) + self.assertIsInstance(self.annotation, Annotation) + self.assertIsInstance(self.param, Param) + self.assertIsInstance(self._pass, Pass) + self.assertIsInstance(self.semicolon, Semicolon) + self.assertIsInstance(self.statement, SimpleStatementLine) + self.assertIsInstance(self.indent, IndentedBlock) + self.assertIsInstance(self.function, FunctionDef) + + +class IsWhitespaceNodeFieldTest(_NodeFieldsTest): + """``is_whitespace_node_field`` tests.""" + + def _check_fields(self, is_filtered_field: dict[str, bool], node: CSTNode) -> None: + fields = get_node_fields(node) + self.assertEqual(len(is_filtered_field), len(fields)) + for field in fields: + self.assertEqual( + is_filtered_field[field.name], + is_whitespace_node_field(node, field), + f"Node ``{node.__class__.__qualname__}`` field '{field.name}' " + f"{'should have' if is_filtered_field[field.name] else 'should not have'} " + "been filtered by ``is_whitespace_node_field``", + ) + + def test_module(self) -> None: + """Check if a CST Module node is correctly filtered.""" + is_filtered_field = { + "body": False, + "header": True, + "footer": True, + "encoding": False, + "default_indent": False, + "default_newline": False, + "has_trailing_newline": False, + } + self._check_fields(is_filtered_field, self.module) + + def test_annotation(self) -> None: + """Check if a CST Annotation node is correctly filtered.""" + is_filtered_field = { + "annotation": False, + "whitespace_before_indicator": True, + "whitespace_after_indicator": True, + } + self._check_fields(is_filtered_field, self.annotation) + + def test_param(self) -> None: + """Check if a CST Param node is correctly filtered.""" + is_filtered_field = { + "name": False, + "annotation": False, + "equal": False, + "default": False, + "comma": False, + "star": False, + "whitespace_after_star": True, + "whitespace_after_param": True, + } + self._check_fields(is_filtered_field, self.param) + + def test_semicolon(self) -> None: + """Check if a CST Semicolon node is correctly filtered.""" + is_filtered_field = { + "whitespace_before": True, + "whitespace_after": True, + } + self._check_fields(is_filtered_field, self.semicolon) + + def test_statement(self) -> None: + """Check if a CST SimpleStatementLine node is correctly filtered.""" + is_filtered_field = { + "body": False, + "leading_lines": True, + "trailing_whitespace": True, + } + self._check_fields(is_filtered_field, self.statement) + + def test_indent(self) -> None: + """Check if a CST IndentedBlock node is correctly filtered.""" + is_filtered_field = { + "body": False, + "header": True, + "indent": True, + "footer": True, + } + self._check_fields(is_filtered_field, self.indent) + + def test_function(self) -> None: + """Check if a CST FunctionDef node is correctly filtered.""" + is_filtered_field = { + "name": False, + "params": False, + "body": False, + "decorators": False, + "returns": False, + "asynchronous": False, + "leading_lines": True, + "lines_after_decorators": True, + "whitespace_after_def": True, + "whitespace_after_name": True, + "whitespace_before_params": True, + "whitespace_before_colon": True, + "type_parameters": False, + "whitespace_after_type_parameters": True, + } + self._check_fields(is_filtered_field, self.function) + + +class IsSyntaxNodeFieldTest(_NodeFieldsTest): + """``is_syntax_node_field`` tests.""" + + def _check_fields(self, is_filtered_field: dict[str, bool], node: CSTNode) -> None: + fields = get_node_fields(node) + self.assertEqual(len(is_filtered_field), len(fields)) + for field in fields: + self.assertEqual( + is_filtered_field[field.name], + is_syntax_node_field(node, field), + f"Node ``{node.__class__.__qualname__}`` field '{field.name}' " + f"{'should have' if is_filtered_field[field.name] else 'should not have'} " + "been filtered by ``is_syntax_node_field``", + ) + + def test_module(self) -> None: + """Check if a CST Module node is correctly filtered.""" + is_filtered_field = { + "body": False, + "header": False, + "footer": False, + "encoding": True, + "default_indent": True, + "default_newline": True, + "has_trailing_newline": True, + } + self._check_fields(is_filtered_field, self.module) + + def test_param(self) -> None: + """Check if a CST Param node is correctly filtered.""" + is_filtered_field = { + "name": False, + "annotation": False, + "equal": True, + "default": False, + "comma": True, + "star": False, + "whitespace_after_star": False, + "whitespace_after_param": False, + } + self._check_fields(is_filtered_field, self.param) + + def test_pass(self) -> None: + """Check if a CST Pass node is correctly filtered.""" + is_filtered_field = { + "semicolon": True, + } + self._check_fields(is_filtered_field, self._pass) + + +class IsDefaultNodeFieldTest(_NodeFieldsTest): + """``is_default_node_field`` tests.""" + + def _check_fields(self, is_filtered_field: dict[str, bool], node: CSTNode) -> None: + fields = get_node_fields(node) + self.assertEqual(len(is_filtered_field), len(fields)) + for field in fields: + self.assertEqual( + is_filtered_field[field.name], + is_default_node_field(node, field), + f"Node ``{node.__class__.__qualname__}`` field '{field.name}' " + f"{'should have' if is_filtered_field[field.name] else 'should not have'} " + "been filtered by ``is_default_node_field``", + ) + + def test_module(self) -> None: + """Check if a CST Module node is correctly filtered.""" + is_filtered_field = { + "body": False, + "header": True, + "footer": True, + "encoding": True, + "default_indent": True, + "default_newline": True, + "has_trailing_newline": True, + } + self._check_fields(is_filtered_field, self.module) + + def test_annotation(self) -> None: + """Check if a CST Annotation node is correctly filtered.""" + is_filtered_field = { + "annotation": False, + "whitespace_before_indicator": False, + "whitespace_after_indicator": True, + } + self._check_fields(is_filtered_field, self.annotation) + + def test_param(self) -> None: + """Check if a CST Param node is correctly filtered.""" + is_filtered_field = { + "name": False, + "annotation": False, + "equal": True, + "default": True, + "comma": True, + "star": False, + "whitespace_after_star": True, + "whitespace_after_param": True, + } + self._check_fields(is_filtered_field, self.param) + + def test_statement(self) -> None: + """Check if a CST SimpleStatementLine node is correctly filtered.""" + is_filtered_field = { + "body": False, + "leading_lines": True, + "trailing_whitespace": True, + } + self._check_fields(is_filtered_field, self.statement) + + def test_indent(self) -> None: + """Check if a CST IndentedBlock node is correctly filtered.""" + is_filtered_field = { + "body": False, + "header": True, + "indent": True, + "footer": True, + } + self._check_fields(is_filtered_field, self.indent) + + def test_function(self) -> None: + """Check if a CST FunctionDef node is correctly filtered.""" + is_filtered_field = { + "name": False, + "params": False, + "body": False, + "decorators": True, + "returns": False, + "asynchronous": True, + "leading_lines": True, + "lines_after_decorators": True, + "whitespace_after_def": True, + "whitespace_after_name": True, + "whitespace_before_params": True, + "whitespace_before_colon": True, + "type_parameters": True, + "whitespace_after_type_parameters": True, + } + self._check_fields(is_filtered_field, self.function) diff --git a/libcst/helpers/tests/test_paths.py b/libcst/helpers/tests/test_paths.py new file mode 100644 index 00000000..c36273d4 --- /dev/null +++ b/libcst/helpers/tests/test_paths.py @@ -0,0 +1,58 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from pathlib import Path +from tempfile import TemporaryDirectory + +from libcst.helpers.paths import chdir +from libcst.testing.utils import UnitTest + + +class PathsTest(UnitTest): + def test_chdir(self) -> None: + with TemporaryDirectory() as td: + tdp = Path(td).resolve() + inner = tdp / "foo" / "bar" + inner.mkdir(parents=True) + + with self.subTest("string paths"): + cwd1 = Path.cwd() + + with chdir(tdp.as_posix()) as path2: + cwd2 = Path.cwd() + self.assertEqual(tdp, cwd2) + self.assertEqual(tdp, path2) + + with chdir(inner.as_posix()) as path3: + cwd3 = Path.cwd() + self.assertEqual(inner, cwd3) + self.assertEqual(inner, path3) + + cwd4 = Path.cwd() + self.assertEqual(tdp, cwd4) + self.assertEqual(cwd2, cwd4) + + cwd5 = Path.cwd() + self.assertEqual(cwd1, cwd5) + + with self.subTest("pathlib objects"): + cwd1 = Path.cwd() + + with chdir(tdp) as path2: + cwd2 = Path.cwd() + self.assertEqual(tdp, cwd2) + self.assertEqual(tdp, path2) + + with chdir(inner) as path3: + cwd3 = Path.cwd() + self.assertEqual(inner, cwd3) + self.assertEqual(inner, path3) + + cwd4 = Path.cwd() + self.assertEqual(tdp, cwd4) + self.assertEqual(cwd2, cwd4) + + cwd5 = Path.cwd() + self.assertEqual(cwd1, cwd5) diff --git a/libcst/helpers/tests/test_statement.py b/libcst/helpers/tests/test_statement.py deleted file mode 100644 index 4d2009d1..00000000 --- a/libcst/helpers/tests/test_statement.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# -from typing import Optional - -import libcst as cst -from libcst.helpers import ( - ensure_type, - get_absolute_module_for_import, - get_absolute_module_for_import_or_raise, -) -from libcst.testing.utils import UnitTest, data_provider - - -class StatementTest(UnitTest): - @data_provider( - ( - # Simple imports that are already absolute. - (None, "from a.b import c", "a.b"), - ("x.y.z", "from a.b import c", "a.b"), - # Relative import that can't be resolved due to missing module. - (None, "from ..w import c", None), - # Relative import that goes past the module level. - ("x", "from ...y import z", None), - ("x.y.z", "from .....w import c", None), - ("x.y.z", "from ... import c", None), - # Correct resolution of absolute from relative modules. - ("x.y.z", "from . import c", "x.y"), - ("x.y.z", "from .. import c", "x"), - ("x.y.z", "from .w import c", "x.y.w"), - ("x.y.z", "from ..w import c", "x.w"), - ("x.y.z", "from ...w import c", "w"), - ) - ) - def test_get_absolute_module( - self, - module: Optional[str], - importfrom: str, - output: Optional[str], - ) -> None: - node = ensure_type(cst.parse_statement(importfrom), cst.SimpleStatementLine) - assert len(node.body) == 1, "Unexpected number of statements!" - import_node = ensure_type(node.body[0], cst.ImportFrom) - - self.assertEqual(get_absolute_module_for_import(module, import_node), output) - if output is None: - with self.assertRaises(Exception): - get_absolute_module_for_import_or_raise(module, import_node) - else: - self.assertEqual( - get_absolute_module_for_import_or_raise(module, import_node), output - ) - - @data_provider( - ( - # Nodes without an asname - (cst.ImportAlias(name=cst.Name("foo")), "foo", None), - ( - cst.ImportAlias(name=cst.Attribute(cst.Name("foo"), cst.Name("bar"))), - "foo.bar", - None, - ), - # Nodes with an asname - ( - cst.ImportAlias( - name=cst.Name("foo"), asname=cst.AsName(name=cst.Name("baz")) - ), - "foo", - "baz", - ), - ( - cst.ImportAlias( - name=cst.Attribute(cst.Name("foo"), cst.Name("bar")), - asname=cst.AsName(name=cst.Name("baz")), - ), - "foo.bar", - "baz", - ), - ) - ) - def test_importalias_helpers( - self, alias_node: cst.ImportAlias, full_name: str, alias: Optional[str] - ) -> None: - self.assertEqual(alias_node.evaluated_name, full_name) - self.assertEqual(alias_node.evaluated_alias, alias) diff --git a/libcst/helpers/tests/test_template.py b/libcst/helpers/tests/test_template.py index 1805fb51..cef82dde 100644 --- a/libcst/helpers/tests/test_template.py +++ b/libcst/helpers/tests/test_template.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/matchers/__init__.py b/libcst/matchers/__init__.py index 73b3e7f2..2857fee1 100644 --- a/libcst/matchers/__init__.py +++ b/libcst/matchers/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,12 +6,11 @@ # This file was generated by libcst.codegen.gen_matcher_classes from dataclasses import dataclass -from typing import Callable, Sequence, Union - -from typing_extensions import Literal +from typing import Literal, Optional, Sequence, Union import libcst as cst from libcst.matchers._decorators import call_if_inside, call_if_not_inside, leave, visit + from libcst.matchers._matcher_base import ( AbstractBaseMatcherNodeMeta, AllOf, @@ -21,20 +20,20 @@ from libcst.matchers._matcher_base import ( DoesNotMatch, DoNotCare, DoNotCareSentinel, + extract, + extractall, + findall, + matches, MatchIfTrue, MatchMetadata, MatchMetadataIfTrue, MatchRegex, OneOf, + replace, SaveMatchedNode, TypeOf, ZeroOrMore, ZeroOrOne, - extract, - extractall, - findall, - matches, - replace, ) from libcst.matchers._visitors import ( MatchDecoratorMismatch, @@ -143,6 +142,10 @@ class BaseSuite(_NodeABC): pass +class BaseTemplatedStringContent(_NodeABC): + pass + + class BaseUnaryOp(_NodeABC): pass @@ -153,7 +156,7 @@ MetadataMatchType = Union[MatchMetadata, MatchMetadataIfTrue] BaseParenthesizableWhitespaceMatchType = Union[ "BaseParenthesizableWhitespace", MetadataMatchType, - MatchIfTrue[Callable[[cst.BaseParenthesizableWhitespace], bool]], + MatchIfTrue[cst.BaseParenthesizableWhitespace], ] @@ -226,23 +229,15 @@ class And(BaseBooleanOp, BaseMatcherNode): BaseAssignTargetExpressionMatchType = Union[ "BaseAssignTargetExpression", MetadataMatchType, - MatchIfTrue[Callable[[cst.BaseAssignTargetExpression], bool]], + MatchIfTrue[cst.BaseAssignTargetExpression], ] AnnotationMatchType = Union[ - "Annotation", MetadataMatchType, MatchIfTrue[Callable[[cst.Annotation], bool]] -] -BaseExpressionOrNoneMatchType = Union[ - "BaseExpression", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.BaseExpression, None]], bool]], + "Annotation", MetadataMatchType, MatchIfTrue[cst.Annotation] ] AssignEqualMatchType = Union[ - "AssignEqual", MetadataMatchType, MatchIfTrue[Callable[[cst.AssignEqual], bool]] -] -SemicolonMatchType = Union[ - "Semicolon", MetadataMatchType, MatchIfTrue[Callable[[cst.Semicolon], bool]] + "AssignEqual", MetadataMatchType, MatchIfTrue[cst.AssignEqual] ] +SemicolonMatchType = Union["Semicolon", MetadataMatchType, MatchIfTrue[cst.Semicolon]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -260,10 +255,24 @@ class AnnAssign(BaseSmallStatement, BaseMatcherNode): AllOf[AnnotationMatchType], ] = DoNotCare() value: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() equal: Union[ AssignEqualMatchType, @@ -286,9 +295,7 @@ class AnnAssign(BaseSmallStatement, BaseMatcherNode): BaseExpressionMatchType = Union[ - "BaseExpression", - MetadataMatchType, - MatchIfTrue[Callable[[cst.BaseExpression], bool]], + "BaseExpression", MetadataMatchType, MatchIfTrue[cst.BaseExpression] ] @@ -320,15 +327,7 @@ class Annotation(BaseMatcherNode): ] = DoNotCare() -NameOrNoneMatchType = Union[ - "Name", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Name, None]], bool]], -] -CommaMatchType = Union[ - "Comma", MetadataMatchType, MatchIfTrue[Callable[[cst.Comma], bool]] -] +CommaMatchType = Union["Comma", MetadataMatchType, MatchIfTrue[cst.Comma]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -340,10 +339,16 @@ class Arg(BaseMatcherNode): AllOf[BaseExpressionMatchType], ] = DoNotCare() keyword: Union[ - NameOrNoneMatchType, + Optional["Name"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Name]], DoNotCareSentinel, - OneOf[NameOrNoneMatchType], - AllOf[NameOrNoneMatchType], + OneOf[ + Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] + ], + AllOf[ + Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] + ], ] = DoNotCare() equal: Union[ AssignEqualMatchType, @@ -357,20 +362,20 @@ class Arg(BaseMatcherNode): star: Union[ Literal["", "*", "**"], MetadataMatchType, - MatchIfTrue[Callable[[Literal["", "*", "**"]], bool]], + MatchIfTrue[Literal["", "*", "**"]], DoNotCareSentinel, OneOf[ Union[ Literal["", "*", "**"], MetadataMatchType, - MatchIfTrue[Callable[[Literal["", "*", "**"]], bool]], + MatchIfTrue[Literal["", "*", "**"]], ] ], AllOf[ Union[ Literal["", "*", "**"], MetadataMatchType, - MatchIfTrue[Callable[[Literal["", "*", "**"]], bool]], + MatchIfTrue[Literal["", "*", "**"]], ] ], ] = DoNotCare() @@ -399,7 +404,7 @@ NameOrTupleOrListMatchType = Union[ "Tuple", "List", MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Name, cst.Tuple, cst.List]], bool]], + MatchIfTrue[Union[cst.Name, cst.Tuple, cst.List]], ] @@ -432,9 +437,7 @@ class AsName(BaseMatcherNode): SimpleWhitespaceMatchType = Union[ - "SimpleWhitespace", - MetadataMatchType, - MatchIfTrue[Callable[[cst.SimpleWhitespace], bool]], + "SimpleWhitespace", MetadataMatchType, MatchIfTrue[cst.SimpleWhitespace] ] @@ -447,10 +450,24 @@ class Assert(BaseSmallStatement, BaseMatcherNode): AllOf[BaseExpressionMatchType], ] = DoNotCare() msg: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] @@ -476,7 +493,7 @@ class Assert(BaseSmallStatement, BaseMatcherNode): AssignTargetMatchType = Union[ - "AssignTarget", MetadataMatchType, MatchIfTrue[Callable[[cst.AssignTarget], bool]] + "AssignTarget", MetadataMatchType, MatchIfTrue[cst.AssignTarget] ] @@ -508,7 +525,7 @@ class Assign(BaseSmallStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.AssignTarget]], bool]], + MatchIfTrue[Sequence[cst.AssignTarget]], OneOf[ Union[ Sequence[ @@ -532,7 +549,7 @@ class Assign(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.AssignTarget]], bool]], + MatchIfTrue[Sequence[cst.AssignTarget]], ] ], AllOf[ @@ -558,7 +575,7 @@ class Assign(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.AssignTarget]], bool]], + MatchIfTrue[Sequence[cst.AssignTarget]], ] ], ] = DoNotCare() @@ -648,15 +665,11 @@ class Asynchronous(BaseMatcherNode): ] = DoNotCare() -NameMatchType = Union[ - "Name", MetadataMatchType, MatchIfTrue[Callable[[cst.Name], bool]] -] -DotMatchType = Union["Dot", MetadataMatchType, MatchIfTrue[Callable[[cst.Dot], bool]]] -LeftParenMatchType = Union[ - "LeftParen", MetadataMatchType, MatchIfTrue[Callable[[cst.LeftParen], bool]] -] +NameMatchType = Union["Name", MetadataMatchType, MatchIfTrue[cst.Name]] +DotMatchType = Union["Dot", MetadataMatchType, MatchIfTrue[cst.Dot]] +LeftParenMatchType = Union["LeftParen", MetadataMatchType, MatchIfTrue[cst.LeftParen]] RightParenMatchType = Union[ - "RightParen", MetadataMatchType, MatchIfTrue[Callable[[cst.RightParen], bool]] + "RightParen", MetadataMatchType, MatchIfTrue[cst.RightParen] ] @@ -702,7 +715,7 @@ class Attribute( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -726,7 +739,7 @@ class Attribute( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -752,7 +765,7 @@ class Attribute( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -782,7 +795,7 @@ class Attribute( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -806,7 +819,7 @@ class Attribute( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -832,7 +845,7 @@ class Attribute( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -844,9 +857,7 @@ class Attribute( ] = DoNotCare() -BaseAugOpMatchType = Union[ - "BaseAugOp", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseAugOp], bool]] -] +BaseAugOpMatchType = Union["BaseAugOp", MetadataMatchType, MatchIfTrue[cst.BaseAugOp]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -917,7 +928,7 @@ class Await(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -941,7 +952,7 @@ class Await(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -967,7 +978,7 @@ class Await(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -997,7 +1008,7 @@ class Await(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -1021,7 +1032,7 @@ class Await(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -1047,7 +1058,7 @@ class Await(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -1066,7 +1077,7 @@ class Await(BaseExpression, BaseMatcherNode): BaseBinaryOpMatchType = Union[ - "BaseBinaryOp", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseBinaryOp], bool]] + "BaseBinaryOp", MetadataMatchType, MatchIfTrue[cst.BaseBinaryOp] ] @@ -1116,7 +1127,7 @@ class BinaryOperation(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -1140,7 +1151,7 @@ class BinaryOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -1166,7 +1177,7 @@ class BinaryOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -1196,7 +1207,7 @@ class BinaryOperation(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -1220,7 +1231,7 @@ class BinaryOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -1246,7 +1257,7 @@ class BinaryOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -1407,7 +1418,7 @@ class BitXorAssign(BaseAugOp, BaseMatcherNode): BaseBooleanOpMatchType = Union[ - "BaseBooleanOp", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseBooleanOp], bool]] + "BaseBooleanOp", MetadataMatchType, MatchIfTrue[cst.BaseBooleanOp] ] @@ -1457,7 +1468,7 @@ class BooleanOperation(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -1481,7 +1492,7 @@ class BooleanOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -1507,7 +1518,7 @@ class BooleanOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -1537,7 +1548,7 @@ class BooleanOperation(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -1561,7 +1572,7 @@ class BooleanOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -1587,7 +1598,7 @@ class BooleanOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -1615,7 +1626,7 @@ class Break(BaseSmallStatement, BaseMatcherNode): ] = DoNotCare() -ArgMatchType = Union["Arg", MetadataMatchType, MatchIfTrue[Callable[[cst.Arg], bool]]] +ArgMatchType = Union["Arg", MetadataMatchType, MatchIfTrue[cst.Arg]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -1652,7 +1663,7 @@ class Call(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], OneOf[ Union[ Sequence[ @@ -1672,7 +1683,7 @@ class Call(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], ] ], AllOf[ @@ -1694,7 +1705,7 @@ class Call(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], ] ], ] = DoNotCare() @@ -1724,7 +1735,7 @@ class Call(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -1748,7 +1759,7 @@ class Call(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -1774,7 +1785,7 @@ class Call(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -1804,7 +1815,7 @@ class Call(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -1828,7 +1839,7 @@ class Call(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -1854,7 +1865,7 @@ class Call(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -1878,15 +1889,9 @@ class Call(BaseExpression, BaseMatcherNode): ] = DoNotCare() -BaseSuiteMatchType = Union[ - "BaseSuite", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseSuite], bool]] -] -DecoratorMatchType = Union[ - "Decorator", MetadataMatchType, MatchIfTrue[Callable[[cst.Decorator], bool]] -] -EmptyLineMatchType = Union[ - "EmptyLine", MetadataMatchType, MatchIfTrue[Callable[[cst.EmptyLine], bool]] -] +BaseSuiteMatchType = Union["BaseSuite", MetadataMatchType, MatchIfTrue[cst.BaseSuite]] +DecoratorMatchType = Union["Decorator", MetadataMatchType, MatchIfTrue[cst.Decorator]] +EmptyLineMatchType = Union["EmptyLine", MetadataMatchType, MatchIfTrue[cst.EmptyLine]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -1926,7 +1931,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], OneOf[ Union[ Sequence[ @@ -1946,7 +1951,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], ] ], AllOf[ @@ -1968,7 +1973,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], ] ], ] = DoNotCare() @@ -1998,7 +2003,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], OneOf[ Union[ Sequence[ @@ -2018,7 +2023,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], ] ], AllOf[ @@ -2040,7 +2045,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Arg]], bool]], + MatchIfTrue[Sequence[cst.Arg]], ] ], ] = DoNotCare() @@ -2070,7 +2075,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Decorator]], bool]], + MatchIfTrue[Sequence[cst.Decorator]], OneOf[ Union[ Sequence[ @@ -2094,7 +2099,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Decorator]], bool]], + MatchIfTrue[Sequence[cst.Decorator]], ] ], AllOf[ @@ -2120,7 +2125,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Decorator]], bool]], + MatchIfTrue[Sequence[cst.Decorator]], ] ], ] = DoNotCare() @@ -2162,7 +2167,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -2186,7 +2191,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -2212,7 +2217,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -2242,7 +2247,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -2266,7 +2271,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -2292,7 +2297,7 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -2314,6 +2319,32 @@ class ClassDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() + type_parameters: Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + ] + ], + AllOf[ + Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + ] + ], + ] = DoNotCare() + whitespace_after_type_parameters: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, @@ -2366,7 +2397,7 @@ class Comma(BaseMatcherNode): ] = DoNotCare() -strMatchType = Union[str, MetadataMatchType, MatchIfTrue[Callable[[str], bool]]] +strMatchType = Union[str, MetadataMatchType, MatchIfTrue[str]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -2382,21 +2413,7 @@ class Comment(BaseMatcherNode): ] = DoNotCare() -CompIfMatchType = Union[ - "CompIf", MetadataMatchType, MatchIfTrue[Callable[[cst.CompIf], bool]] -] -CompForOrNoneMatchType = Union[ - "CompFor", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.CompFor, None]], bool]], -] -AsynchronousOrNoneMatchType = Union[ - "Asynchronous", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Asynchronous, None]], bool]], -] +CompIfMatchType = Union["CompIf", MetadataMatchType, MatchIfTrue[cst.CompIf]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -2439,7 +2456,7 @@ class CompFor(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.CompIf]], bool]], + MatchIfTrue[Sequence[cst.CompIf]], OneOf[ Union[ Sequence[ @@ -2463,7 +2480,7 @@ class CompFor(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.CompIf]], bool]], + MatchIfTrue[Sequence[cst.CompIf]], ] ], AllOf[ @@ -2489,21 +2506,49 @@ class CompFor(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.CompIf]], bool]], + MatchIfTrue[Sequence[cst.CompIf]], ] ], ] = DoNotCare() inner_for_in: Union[ - CompForOrNoneMatchType, + Optional["CompFor"], + MetadataMatchType, + MatchIfTrue[Optional[cst.CompFor]], DoNotCareSentinel, - OneOf[CompForOrNoneMatchType], - AllOf[CompForOrNoneMatchType], + OneOf[ + Union[ + Optional["CompFor"], + MetadataMatchType, + MatchIfTrue[Optional[cst.CompFor]], + ] + ], + AllOf[ + Union[ + Optional["CompFor"], + MetadataMatchType, + MatchIfTrue[Optional[cst.CompFor]], + ] + ], ] = DoNotCare() asynchronous: Union[ - AsynchronousOrNoneMatchType, + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], DoNotCareSentinel, - OneOf[AsynchronousOrNoneMatchType], - AllOf[AsynchronousOrNoneMatchType], + OneOf[ + Union[ + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], + ] + ], + AllOf[ + Union[ + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], + ] + ], ] = DoNotCare() whitespace_before: Union[ BaseParenthesizableWhitespaceMatchType, @@ -2566,9 +2611,7 @@ class CompIf(BaseMatcherNode): ComparisonTargetMatchType = Union[ - "ComparisonTarget", - MetadataMatchType, - MatchIfTrue[Callable[[cst.ComparisonTarget], bool]], + "ComparisonTarget", MetadataMatchType, MatchIfTrue[cst.ComparisonTarget] ] @@ -2606,7 +2649,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.ComparisonTarget]], bool]], + MatchIfTrue[Sequence[cst.ComparisonTarget]], OneOf[ Union[ Sequence[ @@ -2630,7 +2673,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ComparisonTarget]], bool]], + MatchIfTrue[Sequence[cst.ComparisonTarget]], ] ], AllOf[ @@ -2656,7 +2699,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ComparisonTarget]], bool]], + MatchIfTrue[Sequence[cst.ComparisonTarget]], ] ], ] = DoNotCare() @@ -2686,7 +2729,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -2710,7 +2753,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -2736,7 +2779,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -2766,7 +2809,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -2790,7 +2833,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -2816,7 +2859,7 @@ class Comparison(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -2829,7 +2872,7 @@ class Comparison(BaseExpression, BaseMatcherNode): BaseCompOpMatchType = Union[ - "BaseCompOp", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseCompOp], bool]] + "BaseCompOp", MetadataMatchType, MatchIfTrue[cst.BaseCompOp] ] @@ -2859,18 +2902,14 @@ SimpleStringOrFormattedStringMatchType = Union[ "SimpleString", "FormattedString", MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.SimpleString, cst.FormattedString]], bool]], + MatchIfTrue[Union[cst.SimpleString, cst.FormattedString]], ] SimpleStringOrFormattedStringOrConcatenatedStringMatchType = Union[ "SimpleString", "FormattedString", "ConcatenatedString", MetadataMatchType, - MatchIfTrue[ - Callable[ - [Union[cst.SimpleString, cst.FormattedString, cst.ConcatenatedString]], bool - ] - ], + MatchIfTrue[Union[cst.SimpleString, cst.FormattedString, cst.ConcatenatedString]], ] @@ -2914,7 +2953,7 @@ class ConcatenatedString(BaseExpression, BaseString, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -2938,7 +2977,7 @@ class ConcatenatedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -2964,7 +3003,7 @@ class ConcatenatedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -2994,7 +3033,7 @@ class ConcatenatedString(BaseExpression, BaseString, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -3018,7 +3057,7 @@ class ConcatenatedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -3044,7 +3083,7 @@ class ConcatenatedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -3078,27 +3117,18 @@ class Continue(BaseSmallStatement, BaseMatcherNode): ] = DoNotCare() -NameOrAttributeOrCallMatchType = Union[ - "Name", - "Attribute", - "Call", - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Name, cst.Attribute, cst.Call]], bool]], -] TrailingWhitespaceMatchType = Union[ - "TrailingWhitespace", - MetadataMatchType, - MatchIfTrue[Callable[[cst.TrailingWhitespace], bool]], + "TrailingWhitespace", MetadataMatchType, MatchIfTrue[cst.TrailingWhitespace] ] @dataclass(frozen=True, eq=False, unsafe_hash=False) class Decorator(BaseMatcherNode): decorator: Union[ - NameOrAttributeOrCallMatchType, + BaseExpressionMatchType, DoNotCareSentinel, - OneOf[NameOrAttributeOrCallMatchType], - AllOf[NameOrAttributeOrCallMatchType], + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], ] = DoNotCare() leading_lines: Union[ Sequence[ @@ -3126,7 +3156,7 @@ class Decorator(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -3150,7 +3180,7 @@ class Decorator(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -3176,7 +3206,7 @@ class Decorator(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -3203,7 +3233,7 @@ class Decorator(BaseMatcherNode): BaseDelTargetExpressionMatchType = Union[ "BaseDelTargetExpression", MetadataMatchType, - MatchIfTrue[Callable[[cst.BaseDelTargetExpression], bool]], + MatchIfTrue[cst.BaseDelTargetExpression], ] @@ -3236,19 +3266,13 @@ class Del(BaseSmallStatement, BaseMatcherNode): BaseDictElementMatchType = Union[ - "BaseDictElement", - MetadataMatchType, - MatchIfTrue[Callable[[cst.BaseDictElement], bool]], + "BaseDictElement", MetadataMatchType, MatchIfTrue[cst.BaseDictElement] ] LeftCurlyBraceMatchType = Union[ - "LeftCurlyBrace", - MetadataMatchType, - MatchIfTrue[Callable[[cst.LeftCurlyBrace], bool]], + "LeftCurlyBrace", MetadataMatchType, MatchIfTrue[cst.LeftCurlyBrace] ] RightCurlyBraceMatchType = Union[ - "RightCurlyBrace", - MetadataMatchType, - MatchIfTrue[Callable[[cst.RightCurlyBrace], bool]], + "RightCurlyBrace", MetadataMatchType, MatchIfTrue[cst.RightCurlyBrace] ] @@ -3280,7 +3304,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseDictElement]], bool]], + MatchIfTrue[Sequence[cst.BaseDictElement]], OneOf[ Union[ Sequence[ @@ -3304,7 +3328,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseDictElement]], bool]], + MatchIfTrue[Sequence[cst.BaseDictElement]], ] ], AllOf[ @@ -3330,7 +3354,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseDictElement]], bool]], + MatchIfTrue[Sequence[cst.BaseDictElement]], ] ], ] = DoNotCare() @@ -3372,7 +3396,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -3396,7 +3420,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -3422,7 +3446,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -3452,7 +3476,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -3476,7 +3500,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -3502,7 +3526,7 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -3514,24 +3538,22 @@ class Dict(BaseDict, BaseExpression, BaseMatcherNode): ] = DoNotCare() -CompForMatchType = Union[ - "CompFor", MetadataMatchType, MatchIfTrue[Callable[[cst.CompFor], bool]] -] +CompForMatchType = Union["CompFor", MetadataMatchType, MatchIfTrue[cst.CompFor]] @dataclass(frozen=True, eq=False, unsafe_hash=False) class DictComp(BaseComp, BaseDict, BaseExpression, BaseMatcherNode): key: Union[ - BaseAssignTargetExpressionMatchType, + BaseExpressionMatchType, DoNotCareSentinel, - OneOf[BaseAssignTargetExpressionMatchType], - AllOf[BaseAssignTargetExpressionMatchType], + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], ] = DoNotCare() value: Union[ - BaseAssignTargetExpressionMatchType, + BaseExpressionMatchType, DoNotCareSentinel, - OneOf[BaseAssignTargetExpressionMatchType], - AllOf[BaseAssignTargetExpressionMatchType], + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], ] = DoNotCare() for_in: Union[ CompForMatchType, @@ -3577,7 +3599,7 @@ class DictComp(BaseComp, BaseDict, BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -3601,7 +3623,7 @@ class DictComp(BaseComp, BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -3627,7 +3649,7 @@ class DictComp(BaseComp, BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -3657,7 +3679,7 @@ class DictComp(BaseComp, BaseDict, BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -3681,7 +3703,7 @@ class DictComp(BaseComp, BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -3707,7 +3729,7 @@ class DictComp(BaseComp, BaseDict, BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -3881,7 +3903,7 @@ class Ellipsis(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -3905,7 +3927,7 @@ class Ellipsis(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -3931,7 +3953,7 @@ class Ellipsis(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -3961,7 +3983,7 @@ class Ellipsis(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -3985,7 +4007,7 @@ class Ellipsis(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -4011,7 +4033,7 @@ class Ellipsis(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -4057,7 +4079,7 @@ class Else(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -4081,7 +4103,7 @@ class Else(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -4107,7 +4129,7 @@ class Else(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -4125,16 +4147,8 @@ class Else(BaseMatcherNode): ] = DoNotCare() -boolMatchType = Union[bool, MetadataMatchType, MatchIfTrue[Callable[[bool], bool]]] -CommentOrNoneMatchType = Union[ - "Comment", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Comment, None]], bool]], -] -NewlineMatchType = Union[ - "Newline", MetadataMatchType, MatchIfTrue[Callable[[cst.Newline], bool]] -] +boolMatchType = Union[bool, MetadataMatchType, MatchIfTrue[bool]] +NewlineMatchType = Union["Newline", MetadataMatchType, MatchIfTrue[cst.Newline]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -4149,10 +4163,24 @@ class EmptyLine(BaseMatcherNode): AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() comment: Union[ - CommentOrNoneMatchType, + Optional["Comment"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Comment]], DoNotCareSentinel, - OneOf[CommentOrNoneMatchType], - AllOf[CommentOrNoneMatchType], + OneOf[ + Union[ + Optional["Comment"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Comment]], + ] + ], + AllOf[ + Union[ + Optional["Comment"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Comment]], + ] + ], ] = DoNotCare() newline: Union[ NewlineMatchType, @@ -4190,14 +4218,6 @@ class Equal(BaseCompOp, BaseMatcherNode): ] = DoNotCare() -AsNameOrNoneMatchType = Union[ - "AsName", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.AsName, None]], bool]], -] - - @dataclass(frozen=True, eq=False, unsafe_hash=False) class ExceptHandler(BaseMatcherNode): body: Union[ @@ -4207,16 +4227,40 @@ class ExceptHandler(BaseMatcherNode): AllOf[BaseSuiteMatchType], ] = DoNotCare() type: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() name: Union[ - AsNameOrNoneMatchType, + Optional["AsName"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AsName]], DoNotCareSentinel, - OneOf[AsNameOrNoneMatchType], - AllOf[AsNameOrNoneMatchType], + OneOf[ + Union[ + Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] + ] + ], + AllOf[ + Union[ + Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] + ] + ], ] = DoNotCare() leading_lines: Union[ Sequence[ @@ -4244,7 +4288,7 @@ class ExceptHandler(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -4268,7 +4312,7 @@ class ExceptHandler(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -4294,7 +4338,7 @@ class ExceptHandler(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -4318,6 +4362,142 @@ class ExceptHandler(BaseMatcherNode): ] = DoNotCare() +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class ExceptStarHandler(BaseMatcherNode): + body: Union[ + BaseSuiteMatchType, + DoNotCareSentinel, + OneOf[BaseSuiteMatchType], + AllOf[BaseSuiteMatchType], + ] = DoNotCare() + type: Union[ + BaseExpressionMatchType, + DoNotCareSentinel, + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], + ] = DoNotCare() + name: Union[ + Optional["AsName"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AsName]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] + ] + ], + AllOf[ + Union[ + Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] + ] + ], + ] = DoNotCare() + leading_lines: Union[ + Sequence[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.EmptyLine]], + OneOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + ] = DoNotCare() + whitespace_after_except: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_star: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_before_colon: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + @dataclass(frozen=True, eq=False, unsafe_hash=False) class Expr(BaseSmallStatement, BaseMatcherNode): value: Union[ @@ -4374,7 +4554,7 @@ class Finally(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -4398,7 +4578,7 @@ class Finally(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -4424,7 +4604,7 @@ class Finally(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -4473,7 +4653,7 @@ class Float(BaseExpression, BaseNumber, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -4497,7 +4677,7 @@ class Float(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -4523,7 +4703,7 @@ class Float(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -4553,7 +4733,7 @@ class Float(BaseExpression, BaseNumber, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -4577,7 +4757,7 @@ class Float(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -4603,7 +4783,7 @@ class Float(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -4659,14 +4839,6 @@ class FloorDivideAssign(BaseAugOp, BaseMatcherNode): ] = DoNotCare() -ElseOrNoneMatchType = Union[ - "Else", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Else, None]], bool]], -] - - @dataclass(frozen=True, eq=False, unsafe_hash=False) class For(BaseCompoundStatement, BaseStatement, BaseMatcherNode): target: Union[ @@ -4688,16 +4860,36 @@ class For(BaseCompoundStatement, BaseStatement, BaseMatcherNode): AllOf[BaseSuiteMatchType], ] = DoNotCare() orelse: Union[ - ElseOrNoneMatchType, + Optional["Else"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Else]], DoNotCareSentinel, - OneOf[ElseOrNoneMatchType], - AllOf[ElseOrNoneMatchType], + OneOf[ + Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] + ], + AllOf[ + Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] + ], ] = DoNotCare() asynchronous: Union[ - AsynchronousOrNoneMatchType, + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], DoNotCareSentinel, - OneOf[AsynchronousOrNoneMatchType], - AllOf[AsynchronousOrNoneMatchType], + OneOf[ + Union[ + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], + ] + ], + AllOf[ + Union[ + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], + ] + ], ] = DoNotCare() leading_lines: Union[ Sequence[ @@ -4725,7 +4917,7 @@ class For(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -4749,7 +4941,7 @@ class For(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -4775,7 +4967,7 @@ class For(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -4814,7 +5006,7 @@ class For(BaseCompoundStatement, BaseStatement, BaseMatcherNode): BaseFormattedStringContentMatchType = Union[ "BaseFormattedStringContent", MetadataMatchType, - MatchIfTrue[Callable[[cst.BaseFormattedStringContent], bool]], + MatchIfTrue[cst.BaseFormattedStringContent], ] @@ -4846,7 +5038,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseFormattedStringContent]], bool]], + MatchIfTrue[Sequence[cst.BaseFormattedStringContent]], OneOf[ Union[ Sequence[ @@ -4870,7 +5062,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseFormattedStringContent]], bool]], + MatchIfTrue[Sequence[cst.BaseFormattedStringContent]], ] ], AllOf[ @@ -4896,7 +5088,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseFormattedStringContent]], bool]], + MatchIfTrue[Sequence[cst.BaseFormattedStringContent]], ] ], ] = DoNotCare() @@ -4906,20 +5098,20 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): end: Union[ Literal['"', "'", '"""', "'''"], MetadataMatchType, - MatchIfTrue[Callable[[Literal['"', "'", '"""', "'''"]], bool]], + MatchIfTrue[Literal['"', "'", '"""', "'''"]], DoNotCareSentinel, OneOf[ Union[ Literal['"', "'", '"""', "'''"], MetadataMatchType, - MatchIfTrue[Callable[[Literal['"', "'", '"""', "'''"]], bool]], + MatchIfTrue[Literal['"', "'", '"""', "'''"]], ] ], AllOf[ Union[ Literal['"', "'", '"""', "'''"], MetadataMatchType, - MatchIfTrue[Callable[[Literal['"', "'", '"""', "'''"]], bool]], + MatchIfTrue[Literal['"', "'", '"""', "'''"]], ] ], ] = DoNotCare() @@ -4949,7 +5141,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -4973,7 +5165,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -4999,7 +5191,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -5029,7 +5221,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -5053,7 +5245,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -5079,7 +5271,7 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -5091,17 +5283,6 @@ class FormattedString(BaseExpression, BaseString, BaseMatcherNode): ] = DoNotCare() -strOrNoneMatchType = Union[ - str, None, MetadataMatchType, MatchIfTrue[Callable[[Union[str, None]], bool]] -] -AssignEqualOrNoneMatchType = Union[ - "AssignEqual", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.AssignEqual, None]], bool]], -] - - @dataclass(frozen=True, eq=False, unsafe_hash=False) class FormattedStringExpression(BaseFormattedStringContent, BaseMatcherNode): expression: Union[ @@ -5111,336 +5292,30 @@ class FormattedStringExpression(BaseFormattedStringContent, BaseMatcherNode): AllOf[BaseExpressionMatchType], ] = DoNotCare() conversion: Union[ - strOrNoneMatchType, + Optional[str], + MetadataMatchType, + MatchIfTrue[Optional[str]], DoNotCareSentinel, - OneOf[strOrNoneMatchType], - AllOf[strOrNoneMatchType], + OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], + AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], ] = DoNotCare() format_spec: Union[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - DoNotCareSentinel, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - DoNotCareSentinel, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - DoNotCareSentinel, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseFormattedStringContent]], bool]], - OneOf[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - MatchIfTrue[ - Callable[[Sequence[cst.BaseFormattedStringContent]], bool] - ], - ] - ], - AllOf[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - MatchIfTrue[ - Callable[[Sequence[cst.BaseFormattedStringContent]], bool] - ], - ] - ], - ], - None, + Optional[Sequence["BaseFormattedStringContent"]], MetadataMatchType, - MatchIfTrue[ - Callable[ - [ - Union[ - Sequence[cst.BaseFormattedStringContent], - None, - OneOf[Union[Sequence[cst.BaseFormattedStringContent], None]], - AllOf[Union[Sequence[cst.BaseFormattedStringContent], None]], - ] - ], - bool, - ] - ], + MatchIfTrue[Optional[Sequence[cst.BaseFormattedStringContent]]], DoNotCareSentinel, OneOf[ Union[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - MatchIfTrue[ - Callable[[Sequence[cst.BaseFormattedStringContent]], bool] - ], - OneOf[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - MatchIfTrue[ - Callable[ - [Sequence[cst.BaseFormattedStringContent]], bool - ] - ], - ] - ], - AllOf[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - MatchIfTrue[ - Callable[ - [Sequence[cst.BaseFormattedStringContent]], bool - ] - ], - ] - ], - ], - None, + Optional[Sequence["BaseFormattedStringContent"]], MetadataMatchType, - MatchIfTrue[ - Callable[ - [ - Union[ - Sequence[cst.BaseFormattedStringContent], - None, - OneOf[ - Union[ - Sequence[cst.BaseFormattedStringContent], None - ] - ], - AllOf[ - Union[ - Sequence[cst.BaseFormattedStringContent], None - ] - ], - ] - ], - bool, - ] - ], + MatchIfTrue[Optional[Sequence[cst.BaseFormattedStringContent]]], ] ], AllOf[ Union[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - MatchIfTrue[ - Callable[[Sequence[cst.BaseFormattedStringContent]], bool] - ], - OneOf[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - MatchIfTrue[ - Callable[ - [Sequence[cst.BaseFormattedStringContent]], bool - ] - ], - ] - ], - AllOf[ - Union[ - Sequence[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - AtLeastN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - AtMostN[ - Union[ - BaseFormattedStringContentMatchType, - OneOf[BaseFormattedStringContentMatchType], - AllOf[BaseFormattedStringContentMatchType], - ] - ], - ] - ], - MatchIfTrue[ - Callable[ - [Sequence[cst.BaseFormattedStringContent]], bool - ] - ], - ] - ], - ], - None, + Optional[Sequence["BaseFormattedStringContent"]], MetadataMatchType, - MatchIfTrue[ - Callable[ - [ - Union[ - Sequence[cst.BaseFormattedStringContent], - None, - OneOf[ - Union[ - Sequence[cst.BaseFormattedStringContent], None - ] - ], - AllOf[ - Union[ - Sequence[cst.BaseFormattedStringContent], None - ] - ], - ] - ], - bool, - ] - ], + MatchIfTrue[Optional[Sequence[cst.BaseFormattedStringContent]]], ] ], ] = DoNotCare() @@ -5457,10 +5332,24 @@ class FormattedStringExpression(BaseFormattedStringContent, BaseMatcherNode): AllOf[BaseParenthesizableWhitespaceMatchType], ] = DoNotCare() equal: Union[ - AssignEqualOrNoneMatchType, + Optional["AssignEqual"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AssignEqual]], DoNotCareSentinel, - OneOf[AssignEqualOrNoneMatchType], - AllOf[AssignEqualOrNoneMatchType], + OneOf[ + Union[ + Optional["AssignEqual"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AssignEqual]], + ] + ], + AllOf[ + Union[ + Optional["AssignEqual"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AssignEqual]], + ] + ], ] = DoNotCare() metadata: Union[ MetadataMatchType, @@ -5512,13 +5401,7 @@ class From(BaseMatcherNode): ParametersMatchType = Union[ - "Parameters", MetadataMatchType, MatchIfTrue[Callable[[cst.Parameters], bool]] -] -AnnotationOrNoneMatchType = Union[ - "Annotation", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Annotation, None]], bool]], + "Parameters", MetadataMatchType, MatchIfTrue[cst.Parameters] ] @@ -5565,7 +5448,7 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Decorator]], bool]], + MatchIfTrue[Sequence[cst.Decorator]], OneOf[ Union[ Sequence[ @@ -5589,7 +5472,7 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Decorator]], bool]], + MatchIfTrue[Sequence[cst.Decorator]], ] ], AllOf[ @@ -5615,21 +5498,49 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Decorator]], bool]], + MatchIfTrue[Sequence[cst.Decorator]], ] ], ] = DoNotCare() returns: Union[ - AnnotationOrNoneMatchType, + Optional["Annotation"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Annotation]], DoNotCareSentinel, - OneOf[AnnotationOrNoneMatchType], - AllOf[AnnotationOrNoneMatchType], + OneOf[ + Union[ + Optional["Annotation"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Annotation]], + ] + ], + AllOf[ + Union[ + Optional["Annotation"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Annotation]], + ] + ], ] = DoNotCare() asynchronous: Union[ - AsynchronousOrNoneMatchType, + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], DoNotCareSentinel, - OneOf[AsynchronousOrNoneMatchType], - AllOf[AsynchronousOrNoneMatchType], + OneOf[ + Union[ + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], + ] + ], + AllOf[ + Union[ + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], + ] + ], ] = DoNotCare() leading_lines: Union[ Sequence[ @@ -5657,7 +5568,7 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -5681,7 +5592,7 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -5707,7 +5618,7 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -5737,7 +5648,7 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -5761,7 +5672,7 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -5787,7 +5698,7 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -5815,6 +5726,32 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): OneOf[SimpleWhitespaceMatchType], AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() + type_parameters: Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + ] + ], + AllOf[ + Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + ] + ], + ] = DoNotCare() + whitespace_after_type_parameters: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, @@ -5826,10 +5763,10 @@ class FunctionDef(BaseCompoundStatement, BaseStatement, BaseMatcherNode): @dataclass(frozen=True, eq=False, unsafe_hash=False) class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): elt: Union[ - BaseAssignTargetExpressionMatchType, + BaseExpressionMatchType, DoNotCareSentinel, - OneOf[BaseAssignTargetExpressionMatchType], - AllOf[BaseAssignTargetExpressionMatchType], + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], ] = DoNotCare() for_in: Union[ CompForMatchType, @@ -5863,7 +5800,7 @@ class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -5887,7 +5824,7 @@ class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -5913,7 +5850,7 @@ class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -5943,7 +5880,7 @@ class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -5967,7 +5904,7 @@ class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -5993,7 +5930,7 @@ class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -6005,9 +5942,7 @@ class GeneratorExp(BaseComp, BaseExpression, BaseSimpleComp, BaseMatcherNode): ] = DoNotCare() -NameItemMatchType = Union[ - "NameItem", MetadataMatchType, MatchIfTrue[Callable[[cst.NameItem], bool]] -] +NameItemMatchType = Union["NameItem", MetadataMatchType, MatchIfTrue[cst.NameItem]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -6038,7 +5973,7 @@ class Global(BaseSmallStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.NameItem]], bool]], + MatchIfTrue[Sequence[cst.NameItem]], OneOf[ Union[ Sequence[ @@ -6062,7 +5997,7 @@ class Global(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.NameItem]], bool]], + MatchIfTrue[Sequence[cst.NameItem]], ] ], AllOf[ @@ -6088,7 +6023,7 @@ class Global(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.NameItem]], bool]], + MatchIfTrue[Sequence[cst.NameItem]], ] ], ] = DoNotCare() @@ -6157,11 +6092,7 @@ class GreaterThanEqual(BaseCompOp, BaseMatcherNode): IfOrElseOrNoneMatchType = Union[ - "If", - "Else", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.If, cst.Else, None]], bool]], + "If", "Else", None, MetadataMatchType, MatchIfTrue[Union[cst.If, cst.Else, None]] ] @@ -6211,7 +6142,7 @@ class If(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -6235,7 +6166,7 @@ class If(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -6261,7 +6192,7 @@ class If(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -6331,7 +6262,7 @@ class IfExp(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -6355,7 +6286,7 @@ class IfExp(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -6381,7 +6312,7 @@ class IfExp(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -6411,7 +6342,7 @@ class IfExp(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -6435,7 +6366,7 @@ class IfExp(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -6461,7 +6392,7 @@ class IfExp(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -6528,7 +6459,7 @@ class Imaginary(BaseExpression, BaseNumber, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -6552,7 +6483,7 @@ class Imaginary(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -6578,7 +6509,7 @@ class Imaginary(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -6608,7 +6539,7 @@ class Imaginary(BaseExpression, BaseNumber, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -6632,7 +6563,7 @@ class Imaginary(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -6658,7 +6589,7 @@ class Imaginary(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -6671,7 +6602,7 @@ class Imaginary(BaseExpression, BaseNumber, BaseMatcherNode): ImportAliasMatchType = Union[ - "ImportAlias", MetadataMatchType, MatchIfTrue[Callable[[cst.ImportAlias], bool]] + "ImportAlias", MetadataMatchType, MatchIfTrue[cst.ImportAlias] ] @@ -6703,7 +6634,7 @@ class Import(BaseSmallStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], OneOf[ Union[ Sequence[ @@ -6727,7 +6658,7 @@ class Import(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], ] ], AllOf[ @@ -6753,7 +6684,7 @@ class Import(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], ] ], ] = DoNotCare() @@ -6778,10 +6709,7 @@ class Import(BaseSmallStatement, BaseMatcherNode): AttributeOrNameMatchType = Union[ - "Attribute", - "Name", - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Attribute, cst.Name]], bool]], + "Attribute", "Name", MetadataMatchType, MatchIfTrue[Union[cst.Attribute, cst.Name]] ] @@ -6794,10 +6722,20 @@ class ImportAlias(BaseMatcherNode): AllOf[AttributeOrNameMatchType], ] = DoNotCare() asname: Union[ - AsNameOrNoneMatchType, + Optional["AsName"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AsName]], DoNotCareSentinel, - OneOf[AsNameOrNoneMatchType], - AllOf[AsNameOrNoneMatchType], + OneOf[ + Union[ + Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] + ] + ], + AllOf[ + Union[ + Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] + ] + ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] @@ -6815,19 +6753,7 @@ AttributeOrNameOrNoneMatchType = Union[ "Name", None, MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Attribute, cst.Name, None]], bool]], -] -LeftParenOrNoneMatchType = Union[ - "LeftParen", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.LeftParen, None]], bool]], -] -RightParenOrNoneMatchType = Union[ - "RightParen", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.RightParen, None]], bool]], + MatchIfTrue[Union[cst.Attribute, cst.Name, None]], ] @@ -6866,7 +6792,7 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], OneOf[ Union[ Sequence[ @@ -6890,7 +6816,7 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], ] ], AllOf[ @@ -6916,23 +6842,18 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], ] ], ], "ImportStar", MetadataMatchType, MatchIfTrue[ - Callable[ - [ - Union[ - Sequence[cst.ImportAlias], - cst.ImportStar, - OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], - AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], - ] - ], - bool, + Union[ + Sequence[cst.ImportAlias], + cst.ImportStar, + OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], + AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], ] ], DoNotCareSentinel, @@ -6960,7 +6881,7 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], OneOf[ Union[ Sequence[ @@ -6984,7 +6905,7 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], ] ], AllOf[ @@ -7010,23 +6931,18 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], ] ], ], "ImportStar", MetadataMatchType, MatchIfTrue[ - Callable[ - [ - Union[ - Sequence[cst.ImportAlias], - cst.ImportStar, - OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], - AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], - ] - ], - bool, + Union[ + Sequence[cst.ImportAlias], + cst.ImportStar, + OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], + AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], ] ], ] @@ -7055,7 +6971,7 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], OneOf[ Union[ Sequence[ @@ -7079,7 +6995,7 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], ] ], AllOf[ @@ -7105,23 +7021,18 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ImportAlias]], bool]], + MatchIfTrue[Sequence[cst.ImportAlias]], ] ], ], "ImportStar", MetadataMatchType, MatchIfTrue[ - Callable[ - [ - Union[ - Sequence[cst.ImportAlias], - cst.ImportStar, - OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], - AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], - ] - ], - bool, + Union[ + Sequence[cst.ImportAlias], + cst.ImportStar, + OneOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], + AllOf[Union[Sequence[cst.ImportAlias], cst.ImportStar]], ] ], ] @@ -7153,7 +7064,7 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Dot]], bool]], + MatchIfTrue[Sequence[cst.Dot]], OneOf[ Union[ Sequence[ @@ -7173,7 +7084,7 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Dot]], bool]], + MatchIfTrue[Sequence[cst.Dot]], ] ], AllOf[ @@ -7195,21 +7106,49 @@ class ImportFrom(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Dot]], bool]], + MatchIfTrue[Sequence[cst.Dot]], ] ], ] = DoNotCare() lpar: Union[ - LeftParenOrNoneMatchType, + Optional["LeftParen"], + MetadataMatchType, + MatchIfTrue[Optional[cst.LeftParen]], DoNotCareSentinel, - OneOf[LeftParenOrNoneMatchType], - AllOf[LeftParenOrNoneMatchType], + OneOf[ + Union[ + Optional["LeftParen"], + MetadataMatchType, + MatchIfTrue[Optional[cst.LeftParen]], + ] + ], + AllOf[ + Union[ + Optional["LeftParen"], + MetadataMatchType, + MatchIfTrue[Optional[cst.LeftParen]], + ] + ], ] = DoNotCare() rpar: Union[ - RightParenOrNoneMatchType, + Optional["RightParen"], + MetadataMatchType, + MatchIfTrue[Optional[cst.RightParen]], DoNotCareSentinel, - OneOf[RightParenOrNoneMatchType], - AllOf[RightParenOrNoneMatchType], + OneOf[ + Union[ + Optional["RightParen"], + MetadataMatchType, + MatchIfTrue[Optional[cst.RightParen]], + ] + ], + AllOf[ + Union[ + Optional["RightParen"], + MetadataMatchType, + MatchIfTrue[Optional[cst.RightParen]], + ] + ], ] = DoNotCare() semicolon: Union[ SemicolonMatchType, @@ -7276,7 +7215,7 @@ class In(BaseCompOp, BaseMatcherNode): BaseStatementMatchType = Union[ - "BaseStatement", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseStatement], bool]] + "BaseStatement", MetadataMatchType, MatchIfTrue[cst.BaseStatement] ] @@ -7308,7 +7247,7 @@ class IndentedBlock(BaseSuite, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseStatement]], OneOf[ Union[ Sequence[ @@ -7332,7 +7271,7 @@ class IndentedBlock(BaseSuite, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseStatement]], ] ], AllOf[ @@ -7358,7 +7297,7 @@ class IndentedBlock(BaseSuite, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseStatement]], ] ], ] = DoNotCare() @@ -7369,10 +7308,12 @@ class IndentedBlock(BaseSuite, BaseMatcherNode): AllOf[TrailingWhitespaceMatchType], ] = DoNotCare() indent: Union[ - strOrNoneMatchType, + Optional[str], + MetadataMatchType, + MatchIfTrue[Optional[str]], DoNotCareSentinel, - OneOf[strOrNoneMatchType], - AllOf[strOrNoneMatchType], + OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], + AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], ] = DoNotCare() footer: Union[ Sequence[ @@ -7400,7 +7341,7 @@ class IndentedBlock(BaseSuite, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -7424,7 +7365,7 @@ class IndentedBlock(BaseSuite, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -7450,7 +7391,7 @@ class IndentedBlock(BaseSuite, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -7470,6 +7411,46 @@ class Index(BaseSlice, BaseMatcherNode): OneOf[BaseExpressionMatchType], AllOf[BaseExpressionMatchType], ] = DoNotCare() + star: Union[ + Optional[Literal["*"]], + MetadataMatchType, + MatchIfTrue[Optional[Literal["*"]]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional[Literal["*"]], + MetadataMatchType, + MatchIfTrue[Optional[Literal["*"]]], + ] + ], + AllOf[ + Union[ + Optional[Literal["*"]], + MetadataMatchType, + MatchIfTrue[Optional[Literal["*"]]], + ] + ], + ] = DoNotCare() + whitespace_after_star: Union[ + Optional["BaseParenthesizableWhitespace"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseParenthesizableWhitespace]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["BaseParenthesizableWhitespace"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseParenthesizableWhitespace]], + ] + ], + AllOf[ + Union[ + Optional["BaseParenthesizableWhitespace"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseParenthesizableWhitespace]], + ] + ], + ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, @@ -7509,7 +7490,7 @@ class Integer(BaseExpression, BaseNumber, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -7533,7 +7514,7 @@ class Integer(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -7559,7 +7540,7 @@ class Integer(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -7589,7 +7570,7 @@ class Integer(BaseExpression, BaseNumber, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -7613,7 +7594,7 @@ class Integer(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -7639,7 +7620,7 @@ class Integer(BaseExpression, BaseNumber, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -7701,9 +7682,7 @@ class IsNot(BaseCompOp, BaseMatcherNode): ] = DoNotCare() -ColonMatchType = Union[ - "Colon", MetadataMatchType, MatchIfTrue[Callable[[cst.Colon], bool]] -] +ColonMatchType = Union["Colon", MetadataMatchType, MatchIfTrue[cst.Colon]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -7749,7 +7728,7 @@ class Lambda(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -7773,7 +7752,7 @@ class Lambda(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -7799,7 +7778,7 @@ class Lambda(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -7829,7 +7808,7 @@ class Lambda(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -7853,7 +7832,7 @@ class Lambda(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -7879,7 +7858,7 @@ class Lambda(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -8034,17 +8013,13 @@ class LessThanEqual(BaseCompOp, BaseMatcherNode): BaseElementMatchType = Union[ - "BaseElement", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseElement], bool]] + "BaseElement", MetadataMatchType, MatchIfTrue[cst.BaseElement] ] LeftSquareBracketMatchType = Union[ - "LeftSquareBracket", - MetadataMatchType, - MatchIfTrue[Callable[[cst.LeftSquareBracket], bool]], + "LeftSquareBracket", MetadataMatchType, MatchIfTrue[cst.LeftSquareBracket] ] RightSquareBracketMatchType = Union[ - "RightSquareBracket", - MetadataMatchType, - MatchIfTrue[Callable[[cst.RightSquareBracket], bool]], + "RightSquareBracket", MetadataMatchType, MatchIfTrue[cst.RightSquareBracket] ] @@ -8082,7 +8057,7 @@ class List( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], OneOf[ Union[ Sequence[ @@ -8106,7 +8081,7 @@ class List( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], ] ], AllOf[ @@ -8132,7 +8107,7 @@ class List( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], ] ], ] = DoNotCare() @@ -8174,7 +8149,7 @@ class List( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -8198,7 +8173,7 @@ class List( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -8224,7 +8199,7 @@ class List( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -8254,7 +8229,7 @@ class List( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -8278,7 +8253,7 @@ class List( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -8304,7 +8279,7 @@ class List( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -8319,10 +8294,10 @@ class List( @dataclass(frozen=True, eq=False, unsafe_hash=False) class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNode): elt: Union[ - BaseAssignTargetExpressionMatchType, + BaseExpressionMatchType, DoNotCareSentinel, - OneOf[BaseAssignTargetExpressionMatchType], - AllOf[BaseAssignTargetExpressionMatchType], + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], ] = DoNotCare() for_in: Union[ CompForMatchType, @@ -8368,7 +8343,7 @@ class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNo ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -8392,7 +8367,7 @@ class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNo ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -8418,7 +8393,7 @@ class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNo ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -8448,7 +8423,7 @@ class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNo ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -8472,7 +8447,7 @@ class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNo ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -8498,7 +8473,7 @@ class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNo ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -8510,6 +8485,2368 @@ class ListComp(BaseComp, BaseExpression, BaseList, BaseSimpleComp, BaseMatcherNo ] = DoNotCare() +MatchCaseMatchType = Union["MatchCase", MetadataMatchType, MatchIfTrue[cst.MatchCase]] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class Match(BaseCompoundStatement, BaseStatement, BaseMatcherNode): + subject: Union[ + BaseExpressionMatchType, + DoNotCareSentinel, + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], + ] = DoNotCare() + cases: Union[ + Sequence[ + Union[ + MatchCaseMatchType, + DoNotCareSentinel, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + AtLeastN[ + Union[ + MatchCaseMatchType, + DoNotCareSentinel, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + ] + ], + AtMostN[ + Union[ + MatchCaseMatchType, + DoNotCareSentinel, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.MatchCase]], + OneOf[ + Union[ + Sequence[ + Union[ + MatchCaseMatchType, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + AtLeastN[ + Union[ + MatchCaseMatchType, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + ] + ], + AtMostN[ + Union[ + MatchCaseMatchType, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchCase]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + MatchCaseMatchType, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + AtLeastN[ + Union[ + MatchCaseMatchType, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + ] + ], + AtMostN[ + Union[ + MatchCaseMatchType, + OneOf[MatchCaseMatchType], + AllOf[MatchCaseMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchCase]], + ] + ], + ] = DoNotCare() + leading_lines: Union[ + Sequence[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.EmptyLine]], + OneOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + ] = DoNotCare() + whitespace_after_match: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_before_colon: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_colon: Union[ + TrailingWhitespaceMatchType, + DoNotCareSentinel, + OneOf[TrailingWhitespaceMatchType], + AllOf[TrailingWhitespaceMatchType], + ] = DoNotCare() + indent: Union[ + Optional[str], + MetadataMatchType, + MatchIfTrue[Optional[str]], + DoNotCareSentinel, + OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], + AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], + ] = DoNotCare() + footer: Union[ + Sequence[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.EmptyLine]], + OneOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchAs(BaseMatcherNode): + pattern: Union[ + Optional["MatchPattern"], + MetadataMatchType, + MatchIfTrue[Optional[cst.MatchPattern]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["MatchPattern"], + MetadataMatchType, + MatchIfTrue[Optional[cst.MatchPattern]], + ] + ], + AllOf[ + Union[ + Optional["MatchPattern"], + MetadataMatchType, + MatchIfTrue[Optional[cst.MatchPattern]], + ] + ], + ] = DoNotCare() + name: Union[ + Optional["Name"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Name]], + DoNotCareSentinel, + OneOf[ + Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] + ], + AllOf[ + Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] + ], + ] = DoNotCare() + whitespace_before_as: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_as: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + lpar: Union[ + Sequence[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.LeftParen]], + OneOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + ] = DoNotCare() + rpar: Union[ + Sequence[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.RightParen]], + OneOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +MatchPatternMatchType = Union[ + "MatchPattern", MetadataMatchType, MatchIfTrue[cst.MatchPattern] +] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchCase(BaseMatcherNode): + pattern: Union[ + MatchPatternMatchType, + DoNotCareSentinel, + OneOf[MatchPatternMatchType], + AllOf[MatchPatternMatchType], + ] = DoNotCare() + body: Union[ + BaseSuiteMatchType, + DoNotCareSentinel, + OneOf[BaseSuiteMatchType], + AllOf[BaseSuiteMatchType], + ] = DoNotCare() + guard: Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + ] = DoNotCare() + leading_lines: Union[ + Sequence[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.EmptyLine]], + OneOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + ] = DoNotCare() + whitespace_after_case: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_before_if: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_if: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_before_colon: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +MatchSequenceElementMatchType = Union[ + "MatchSequenceElement", MetadataMatchType, MatchIfTrue[cst.MatchSequenceElement] +] +MatchKeywordElementMatchType = Union[ + "MatchKeywordElement", MetadataMatchType, MatchIfTrue[cst.MatchKeywordElement] +] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchClass(BaseMatcherNode): + cls: Union[ + BaseExpressionMatchType, + DoNotCareSentinel, + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], + ] = DoNotCare() + patterns: Union[ + Sequence[ + Union[ + MatchSequenceElementMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + AtLeastN[ + Union[ + MatchSequenceElementMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.MatchSequenceElement]], + OneOf[ + Union[ + Sequence[ + Union[ + MatchSequenceElementMatchType, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + AtLeastN[ + Union[ + MatchSequenceElementMatchType, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementMatchType, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchSequenceElement]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + MatchSequenceElementMatchType, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + AtLeastN[ + Union[ + MatchSequenceElementMatchType, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementMatchType, + OneOf[MatchSequenceElementMatchType], + AllOf[MatchSequenceElementMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchSequenceElement]], + ] + ], + ] = DoNotCare() + kwds: Union[ + Sequence[ + Union[ + MatchKeywordElementMatchType, + DoNotCareSentinel, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + AtLeastN[ + Union[ + MatchKeywordElementMatchType, + DoNotCareSentinel, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchKeywordElementMatchType, + DoNotCareSentinel, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.MatchKeywordElement]], + OneOf[ + Union[ + Sequence[ + Union[ + MatchKeywordElementMatchType, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + AtLeastN[ + Union[ + MatchKeywordElementMatchType, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchKeywordElementMatchType, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchKeywordElement]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + MatchKeywordElementMatchType, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + AtLeastN[ + Union[ + MatchKeywordElementMatchType, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchKeywordElementMatchType, + OneOf[MatchKeywordElementMatchType], + AllOf[MatchKeywordElementMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchKeywordElement]], + ] + ], + ] = DoNotCare() + whitespace_after_cls: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + whitespace_before_patterns: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_kwds: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + lpar: Union[ + Sequence[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.LeftParen]], + OneOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + ] = DoNotCare() + rpar: Union[ + Sequence[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.RightParen]], + OneOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchKeywordElement(BaseMatcherNode): + key: Union[ + NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] + ] = DoNotCare() + pattern: Union[ + MatchPatternMatchType, + DoNotCareSentinel, + OneOf[MatchPatternMatchType], + AllOf[MatchPatternMatchType], + ] = DoNotCare() + comma: Union[ + CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] + ] = DoNotCare() + whitespace_before_equal: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_equal: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +MatchSequenceElementOrMatchStarMatchType = Union[ + "MatchSequenceElement", + "MatchStar", + MetadataMatchType, + MatchIfTrue[Union[cst.MatchSequenceElement, cst.MatchStar]], +] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchList(BaseMatcherNode): + patterns: Union[ + Sequence[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + AtLeastN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[ + Sequence[ + Union[ + cst.MatchSequenceElement, + cst.MatchStar, + OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + ] + ] + ], + OneOf[ + Union[ + Sequence[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + AtLeastN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + ] + ], + MatchIfTrue[ + Sequence[ + Union[ + cst.MatchSequenceElement, + cst.MatchStar, + OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + ] + ] + ], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + AtLeastN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + ] + ], + MatchIfTrue[ + Sequence[ + Union[ + cst.MatchSequenceElement, + cst.MatchStar, + OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + ] + ] + ], + ] + ], + ] = DoNotCare() + lbracket: Union[ + Optional["LeftSquareBracket"], + MetadataMatchType, + MatchIfTrue[Optional[cst.LeftSquareBracket]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["LeftSquareBracket"], + MetadataMatchType, + MatchIfTrue[Optional[cst.LeftSquareBracket]], + ] + ], + AllOf[ + Union[ + Optional["LeftSquareBracket"], + MetadataMatchType, + MatchIfTrue[Optional[cst.LeftSquareBracket]], + ] + ], + ] = DoNotCare() + rbracket: Union[ + Optional["RightSquareBracket"], + MetadataMatchType, + MatchIfTrue[Optional[cst.RightSquareBracket]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["RightSquareBracket"], + MetadataMatchType, + MatchIfTrue[Optional[cst.RightSquareBracket]], + ] + ], + AllOf[ + Union[ + Optional["RightSquareBracket"], + MetadataMatchType, + MatchIfTrue[Optional[cst.RightSquareBracket]], + ] + ], + ] = DoNotCare() + lpar: Union[ + Sequence[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.LeftParen]], + OneOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + ] = DoNotCare() + rpar: Union[ + Sequence[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.RightParen]], + OneOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +MatchMappingElementMatchType = Union[ + "MatchMappingElement", MetadataMatchType, MatchIfTrue[cst.MatchMappingElement] +] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchMapping(BaseMatcherNode): + elements: Union[ + Sequence[ + Union[ + MatchMappingElementMatchType, + DoNotCareSentinel, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + AtLeastN[ + Union[ + MatchMappingElementMatchType, + DoNotCareSentinel, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchMappingElementMatchType, + DoNotCareSentinel, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.MatchMappingElement]], + OneOf[ + Union[ + Sequence[ + Union[ + MatchMappingElementMatchType, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + AtLeastN[ + Union[ + MatchMappingElementMatchType, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchMappingElementMatchType, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchMappingElement]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + MatchMappingElementMatchType, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + AtLeastN[ + Union[ + MatchMappingElementMatchType, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchMappingElementMatchType, + OneOf[MatchMappingElementMatchType], + AllOf[MatchMappingElementMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchMappingElement]], + ] + ], + ] = DoNotCare() + lbrace: Union[ + LeftCurlyBraceMatchType, + DoNotCareSentinel, + OneOf[LeftCurlyBraceMatchType], + AllOf[LeftCurlyBraceMatchType], + ] = DoNotCare() + rbrace: Union[ + RightCurlyBraceMatchType, + DoNotCareSentinel, + OneOf[RightCurlyBraceMatchType], + AllOf[RightCurlyBraceMatchType], + ] = DoNotCare() + rest: Union[ + Optional["Name"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Name]], + DoNotCareSentinel, + OneOf[ + Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] + ], + AllOf[ + Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] + ], + ] = DoNotCare() + whitespace_before_rest: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + trailing_comma: Union[ + Optional["Comma"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Comma]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["Comma"], MetadataMatchType, MatchIfTrue[Optional[cst.Comma]] + ] + ], + AllOf[ + Union[ + Optional["Comma"], MetadataMatchType, MatchIfTrue[Optional[cst.Comma]] + ] + ], + ] = DoNotCare() + lpar: Union[ + Sequence[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.LeftParen]], + OneOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + ] = DoNotCare() + rpar: Union[ + Sequence[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.RightParen]], + OneOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchMappingElement(BaseMatcherNode): + key: Union[ + BaseExpressionMatchType, + DoNotCareSentinel, + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], + ] = DoNotCare() + pattern: Union[ + MatchPatternMatchType, + DoNotCareSentinel, + OneOf[MatchPatternMatchType], + AllOf[MatchPatternMatchType], + ] = DoNotCare() + comma: Union[ + CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] + ] = DoNotCare() + whitespace_before_colon: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_colon: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +MatchOrElementMatchType = Union[ + "MatchOrElement", MetadataMatchType, MatchIfTrue[cst.MatchOrElement] +] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchOr(BaseMatcherNode): + patterns: Union[ + Sequence[ + Union[ + MatchOrElementMatchType, + DoNotCareSentinel, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + AtLeastN[ + Union[ + MatchOrElementMatchType, + DoNotCareSentinel, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchOrElementMatchType, + DoNotCareSentinel, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.MatchOrElement]], + OneOf[ + Union[ + Sequence[ + Union[ + MatchOrElementMatchType, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + AtLeastN[ + Union[ + MatchOrElementMatchType, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchOrElementMatchType, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchOrElement]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + MatchOrElementMatchType, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + AtLeastN[ + Union[ + MatchOrElementMatchType, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + ] + ], + AtMostN[ + Union[ + MatchOrElementMatchType, + OneOf[MatchOrElementMatchType], + AllOf[MatchOrElementMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.MatchOrElement]], + ] + ], + ] = DoNotCare() + lpar: Union[ + Sequence[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.LeftParen]], + OneOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + ] = DoNotCare() + rpar: Union[ + Sequence[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.RightParen]], + OneOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +BitOrMatchType = Union["BitOr", MetadataMatchType, MatchIfTrue[cst.BitOr]] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchOrElement(BaseMatcherNode): + pattern: Union[ + MatchPatternMatchType, + DoNotCareSentinel, + OneOf[MatchPatternMatchType], + AllOf[MatchPatternMatchType], + ] = DoNotCare() + separator: Union[ + BitOrMatchType, DoNotCareSentinel, OneOf[BitOrMatchType], AllOf[BitOrMatchType] + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchPattern(BaseMatcherNode): + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchSequence(BaseMatcherNode): + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchSequenceElement(BaseMatcherNode): + value: Union[ + MatchPatternMatchType, + DoNotCareSentinel, + OneOf[MatchPatternMatchType], + AllOf[MatchPatternMatchType], + ] = DoNotCare() + comma: Union[ + CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchSingleton(BaseMatcherNode): + value: Union[ + NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchStar(BaseMatcherNode): + name: Union[ + Optional["Name"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Name]], + DoNotCareSentinel, + OneOf[ + Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] + ], + AllOf[ + Union[Optional["Name"], MetadataMatchType, MatchIfTrue[Optional[cst.Name]]] + ], + ] = DoNotCare() + comma: Union[ + CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] + ] = DoNotCare() + whitespace_before_name: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchTuple(BaseMatcherNode): + patterns: Union[ + Sequence[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + AtLeastN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + DoNotCareSentinel, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[ + Sequence[ + Union[ + cst.MatchSequenceElement, + cst.MatchStar, + OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + ] + ] + ], + OneOf[ + Union[ + Sequence[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + AtLeastN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + ] + ], + MatchIfTrue[ + Sequence[ + Union[ + cst.MatchSequenceElement, + cst.MatchStar, + OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + ] + ] + ], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + AtLeastN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + AtMostN[ + Union[ + MatchSequenceElementOrMatchStarMatchType, + OneOf[MatchSequenceElementOrMatchStarMatchType], + AllOf[MatchSequenceElementOrMatchStarMatchType], + ] + ], + ] + ], + MatchIfTrue[ + Sequence[ + Union[ + cst.MatchSequenceElement, + cst.MatchStar, + OneOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + AllOf[Union[cst.MatchSequenceElement, cst.MatchStar]], + ] + ] + ], + ] + ], + ] = DoNotCare() + lpar: Union[ + Sequence[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.LeftParen]], + OneOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + ] = DoNotCare() + rpar: Union[ + Sequence[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.RightParen]], + OneOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class MatchValue(BaseMatcherNode): + value: Union[ + BaseExpressionMatchType, + DoNotCareSentinel, + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + @dataclass(frozen=True, eq=False, unsafe_hash=False) class MatrixMultiply(BaseBinaryOp, BaseMatcherNode): whitespace_before: Union[ @@ -8574,9 +10911,7 @@ SimpleStatementLineOrBaseCompoundStatementMatchType = Union[ "SimpleStatementLine", "BaseCompoundStatement", MetadataMatchType, - MatchIfTrue[ - Callable[[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], bool] - ], + MatchIfTrue[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], ] @@ -8609,26 +10944,13 @@ class Module(BaseMatcherNode): ], DoNotCareSentinel, MatchIfTrue[ - Callable[ - [ - Sequence[ - Union[ - cst.SimpleStatementLine, - cst.BaseCompoundStatement, - OneOf[ - Union[ - cst.SimpleStatementLine, cst.BaseCompoundStatement - ] - ], - AllOf[ - Union[ - cst.SimpleStatementLine, cst.BaseCompoundStatement - ] - ], - ] - ] - ], - bool, + Sequence[ + Union[ + cst.SimpleStatementLine, + cst.BaseCompoundStatement, + OneOf[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], + AllOf[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]], + ] ] ], OneOf[ @@ -8663,28 +10985,21 @@ class Module(BaseMatcherNode): ] ], MatchIfTrue[ - Callable[ - [ - Sequence[ + Sequence[ + Union[ + cst.SimpleStatementLine, + cst.BaseCompoundStatement, + OneOf[ Union[ - cst.SimpleStatementLine, - cst.BaseCompoundStatement, - OneOf[ - Union[ - cst.SimpleStatementLine, - cst.BaseCompoundStatement, - ] - ], - AllOf[ - Union[ - cst.SimpleStatementLine, - cst.BaseCompoundStatement, - ] - ], + cst.SimpleStatementLine, cst.BaseCompoundStatement ] - ] - ], - bool, + ], + AllOf[ + Union[ + cst.SimpleStatementLine, cst.BaseCompoundStatement + ] + ], + ] ] ], ] @@ -8721,28 +11036,21 @@ class Module(BaseMatcherNode): ] ], MatchIfTrue[ - Callable[ - [ - Sequence[ + Sequence[ + Union[ + cst.SimpleStatementLine, + cst.BaseCompoundStatement, + OneOf[ Union[ - cst.SimpleStatementLine, - cst.BaseCompoundStatement, - OneOf[ - Union[ - cst.SimpleStatementLine, - cst.BaseCompoundStatement, - ] - ], - AllOf[ - Union[ - cst.SimpleStatementLine, - cst.BaseCompoundStatement, - ] - ], + cst.SimpleStatementLine, cst.BaseCompoundStatement ] - ] - ], - bool, + ], + AllOf[ + Union[ + cst.SimpleStatementLine, cst.BaseCompoundStatement + ] + ], + ] ] ], ] @@ -8774,7 +11082,7 @@ class Module(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -8798,7 +11106,7 @@ class Module(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -8824,7 +11132,7 @@ class Module(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -8854,7 +11162,7 @@ class Module(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -8878,7 +11186,7 @@ class Module(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -8904,7 +11212,7 @@ class Module(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -9049,7 +11357,7 @@ class Name( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -9073,7 +11381,7 @@ class Name( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -9099,7 +11407,7 @@ class Name( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -9129,7 +11437,7 @@ class Name( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -9153,7 +11461,7 @@ class Name( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -9179,7 +11487,7 @@ class Name( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -9247,7 +11555,7 @@ class NamedExpr(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -9271,7 +11579,7 @@ class NamedExpr(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -9297,7 +11605,7 @@ class NamedExpr(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -9327,7 +11635,7 @@ class NamedExpr(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -9351,7 +11659,7 @@ class NamedExpr(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -9377,7 +11685,7 @@ class NamedExpr(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -9404,10 +11712,12 @@ class NamedExpr(BaseExpression, BaseMatcherNode): @dataclass(frozen=True, eq=False, unsafe_hash=False) class Newline(BaseMatcherNode): value: Union[ - strOrNoneMatchType, + Optional[str], + MetadataMatchType, + MatchIfTrue[Optional[str]], DoNotCareSentinel, - OneOf[strOrNoneMatchType], - AllOf[strOrNoneMatchType], + OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], + AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], ] = DoNotCare() metadata: Union[ MetadataMatchType, @@ -9445,7 +11755,7 @@ class Nonlocal(BaseSmallStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.NameItem]], bool]], + MatchIfTrue[Sequence[cst.NameItem]], OneOf[ Union[ Sequence[ @@ -9469,7 +11779,7 @@ class Nonlocal(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.NameItem]], bool]], + MatchIfTrue[Sequence[cst.NameItem]], ] ], AllOf[ @@ -9495,7 +11805,7 @@ class Nonlocal(BaseSmallStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.NameItem]], bool]], + MatchIfTrue[Sequence[cst.NameItem]], ] ], ] = DoNotCare() @@ -9616,10 +11926,24 @@ class Param(BaseMatcherNode): NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] ] = DoNotCare() annotation: Union[ - AnnotationOrNoneMatchType, + Optional["Annotation"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Annotation]], DoNotCareSentinel, - OneOf[AnnotationOrNoneMatchType], - AllOf[AnnotationOrNoneMatchType], + OneOf[ + Union[ + Optional["Annotation"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Annotation]], + ] + ], + AllOf[ + Union[ + Optional["Annotation"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Annotation]], + ] + ], ] = DoNotCare() equal: Union[ AssignEqualMatchType, @@ -9628,10 +11952,24 @@ class Param(BaseMatcherNode): AllOf[AssignEqualMatchType], ] = DoNotCare() default: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] @@ -9664,6 +12002,31 @@ class ParamSlash(BaseMatcherNode): comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] ] = DoNotCare() + whitespace_after: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class ParamSpec(BaseMatcherNode): + name: Union[ + NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] + ] = DoNotCare() + whitespace_after_star: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() metadata: Union[ MetadataMatchType, DoNotCareSentinel, @@ -9685,23 +12048,15 @@ class ParamStar(BaseMatcherNode): ] = DoNotCare() -ParamMatchType = Union[ - "Param", MetadataMatchType, MatchIfTrue[Callable[[cst.Param], bool]] -] +ParamMatchType = Union["Param", MetadataMatchType, MatchIfTrue[cst.Param]] ParamOrParamStarMatchType = Union[ "Param", "ParamStar", MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Param, cst.ParamStar]], bool]], -] -ParamOrNoneMatchType = Union[ - "Param", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Param, None]], bool]], + MatchIfTrue[Union[cst.Param, cst.ParamStar]], ] ParamSlashMatchType = Union[ - "ParamSlash", MetadataMatchType, MatchIfTrue[Callable[[cst.ParamSlash], bool]] + "ParamSlash", MetadataMatchType, MatchIfTrue[cst.ParamSlash] ] @@ -9733,7 +12088,7 @@ class Parameters(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], OneOf[ Union[ Sequence[ @@ -9757,7 +12112,7 @@ class Parameters(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], ] ], AllOf[ @@ -9783,7 +12138,7 @@ class Parameters(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], ] ], ] = DoNotCare() @@ -9819,7 +12174,7 @@ class Parameters(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], OneOf[ Union[ Sequence[ @@ -9843,7 +12198,7 @@ class Parameters(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], ] ], AllOf[ @@ -9869,15 +12224,25 @@ class Parameters(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], ] ], ] = DoNotCare() star_kwarg: Union[ - ParamOrNoneMatchType, + Optional["Param"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Param]], DoNotCareSentinel, - OneOf[ParamOrNoneMatchType], - AllOf[ParamOrNoneMatchType], + OneOf[ + Union[ + Optional["Param"], MetadataMatchType, MatchIfTrue[Optional[cst.Param]] + ] + ], + AllOf[ + Union[ + Optional["Param"], MetadataMatchType, MatchIfTrue[Optional[cst.Param]] + ] + ], ] = DoNotCare() posonly_params: Union[ Sequence[ @@ -9905,7 +12270,7 @@ class Parameters(BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], OneOf[ Union[ Sequence[ @@ -9929,7 +12294,7 @@ class Parameters(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], ] ], AllOf[ @@ -9955,7 +12320,7 @@ class Parameters(BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.Param]], bool]], + MatchIfTrue[Sequence[cst.Param]], ] ], ] = DoNotCare() @@ -10007,7 +12372,7 @@ class ParenthesizedWhitespace(BaseParenthesizableWhitespace, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -10031,7 +12396,7 @@ class ParenthesizedWhitespace(BaseParenthesizableWhitespace, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -10057,7 +12422,7 @@ class ParenthesizedWhitespace(BaseParenthesizableWhitespace, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -10154,27 +12519,39 @@ class PowerAssign(BaseAugOp, BaseMatcherNode): ] = DoNotCare() -FromOrNoneMatchType = Union[ - "From", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.From, None]], bool]], -] - - @dataclass(frozen=True, eq=False, unsafe_hash=False) class Raise(BaseSmallStatement, BaseMatcherNode): exc: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() cause: Union[ - FromOrNoneMatchType, + Optional["From"], + MetadataMatchType, + MatchIfTrue[Optional[cst.From]], DoNotCareSentinel, - OneOf[FromOrNoneMatchType], - AllOf[FromOrNoneMatchType], + OneOf[ + Union[Optional["From"], MetadataMatchType, MatchIfTrue[Optional[cst.From]]] + ], + AllOf[ + Union[Optional["From"], MetadataMatchType, MatchIfTrue[Optional[cst.From]]] + ], ] = DoNotCare() whitespace_after_raise: Union[ SimpleWhitespaceMatchType, @@ -10199,10 +12576,24 @@ class Raise(BaseSmallStatement, BaseMatcherNode): @dataclass(frozen=True, eq=False, unsafe_hash=False) class Return(BaseSmallStatement, BaseMatcherNode): value: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() whitespace_after_return: Union[ SimpleWhitespaceMatchType, @@ -10366,7 +12757,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], OneOf[ Union[ Sequence[ @@ -10390,7 +12781,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], ] ], AllOf[ @@ -10416,7 +12807,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], ] ], ] = DoNotCare() @@ -10458,7 +12849,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -10482,7 +12873,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -10508,7 +12899,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -10538,7 +12929,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -10562,7 +12953,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -10588,7 +12979,7 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -10603,10 +12994,10 @@ class Set(BaseExpression, BaseSet, BaseMatcherNode): @dataclass(frozen=True, eq=False, unsafe_hash=False) class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode): elt: Union[ - BaseAssignTargetExpressionMatchType, + BaseExpressionMatchType, DoNotCareSentinel, - OneOf[BaseAssignTargetExpressionMatchType], - AllOf[BaseAssignTargetExpressionMatchType], + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], ] = DoNotCare() for_in: Union[ CompForMatchType, @@ -10652,7 +13043,7 @@ class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -10676,7 +13067,7 @@ class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -10702,7 +13093,7 @@ class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -10732,7 +13123,7 @@ class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -10756,7 +13147,7 @@ class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -10782,7 +13173,7 @@ class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -10795,9 +13186,7 @@ class SetComp(BaseComp, BaseExpression, BaseSet, BaseSimpleComp, BaseMatcherNode BaseSmallStatementMatchType = Union[ - "BaseSmallStatement", - MetadataMatchType, - MatchIfTrue[Callable[[cst.BaseSmallStatement], bool]], + "BaseSmallStatement", MetadataMatchType, MatchIfTrue[cst.BaseSmallStatement] ] @@ -10829,7 +13218,7 @@ class SimpleStatementLine(BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseSmallStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseSmallStatement]], OneOf[ Union[ Sequence[ @@ -10853,7 +13242,7 @@ class SimpleStatementLine(BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseSmallStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseSmallStatement]], ] ], AllOf[ @@ -10879,7 +13268,7 @@ class SimpleStatementLine(BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseSmallStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseSmallStatement]], ] ], ] = DoNotCare() @@ -10909,7 +13298,7 @@ class SimpleStatementLine(BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -10933,7 +13322,7 @@ class SimpleStatementLine(BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -10959,7 +13348,7 @@ class SimpleStatementLine(BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -11005,7 +13394,7 @@ class SimpleStatementSuite(BaseSuite, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseSmallStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseSmallStatement]], OneOf[ Union[ Sequence[ @@ -11029,7 +13418,7 @@ class SimpleStatementSuite(BaseSuite, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseSmallStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseSmallStatement]], ] ], AllOf[ @@ -11055,7 +13444,7 @@ class SimpleStatementSuite(BaseSuite, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseSmallStatement]], bool]], + MatchIfTrue[Sequence[cst.BaseSmallStatement]], ] ], ] = DoNotCare() @@ -11110,7 +13499,7 @@ class SimpleString(BaseExpression, BaseString, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -11134,7 +13523,7 @@ class SimpleString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -11160,7 +13549,7 @@ class SimpleString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -11190,7 +13579,7 @@ class SimpleString(BaseExpression, BaseString, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -11214,7 +13603,7 @@ class SimpleString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -11240,7 +13629,7 @@ class SimpleString(BaseExpression, BaseString, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -11268,22 +13657,64 @@ class SimpleWhitespace(BaseParenthesizableWhitespace, BaseMatcherNode): @dataclass(frozen=True, eq=False, unsafe_hash=False) class Slice(BaseSlice, BaseMatcherNode): lower: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() upper: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() step: Union[ - BaseExpressionOrNoneMatchType, + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], DoNotCareSentinel, - OneOf[BaseExpressionOrNoneMatchType], - AllOf[BaseExpressionOrNoneMatchType], + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], ] = DoNotCare() first_colon: Union[ ColonMatchType, DoNotCareSentinel, OneOf[ColonMatchType], AllOf[ColonMatchType] @@ -11325,7 +13756,7 @@ class StarredDictElement(BaseDictElement, BaseMatcherNode): @dataclass(frozen=True, eq=False, unsafe_hash=False) -class StarredElement(BaseElement, BaseMatcherNode): +class StarredElement(BaseElement, BaseExpression, BaseMatcherNode): value: Union[ BaseExpressionMatchType, DoNotCareSentinel, @@ -11361,7 +13792,7 @@ class StarredElement(BaseElement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -11385,7 +13816,7 @@ class StarredElement(BaseElement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -11411,7 +13842,7 @@ class StarredElement(BaseElement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -11441,7 +13872,7 @@ class StarredElement(BaseElement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -11465,7 +13896,7 @@ class StarredElement(BaseElement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -11491,7 +13922,7 @@ class StarredElement(BaseElement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -11510,9 +13941,7 @@ class StarredElement(BaseElement, BaseMatcherNode): SubscriptElementMatchType = Union[ - "SubscriptElement", - MetadataMatchType, - MatchIfTrue[Callable[[cst.SubscriptElement], bool]], + "SubscriptElement", MetadataMatchType, MatchIfTrue[cst.SubscriptElement] ] @@ -11552,7 +13981,7 @@ class Subscript( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.SubscriptElement]], bool]], + MatchIfTrue[Sequence[cst.SubscriptElement]], OneOf[ Union[ Sequence[ @@ -11576,7 +14005,7 @@ class Subscript( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.SubscriptElement]], bool]], + MatchIfTrue[Sequence[cst.SubscriptElement]], ] ], AllOf[ @@ -11602,7 +14031,7 @@ class Subscript( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.SubscriptElement]], bool]], + MatchIfTrue[Sequence[cst.SubscriptElement]], ] ], ] = DoNotCare() @@ -11644,7 +14073,7 @@ class Subscript( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -11668,7 +14097,7 @@ class Subscript( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -11694,7 +14123,7 @@ class Subscript( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -11724,7 +14153,7 @@ class Subscript( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -11748,7 +14177,7 @@ class Subscript( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -11774,7 +14203,7 @@ class Subscript( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -11792,9 +14221,7 @@ class Subscript( ] = DoNotCare() -BaseSliceMatchType = Union[ - "BaseSlice", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseSlice], bool]] -] +BaseSliceMatchType = Union["BaseSlice", MetadataMatchType, MatchIfTrue[cst.BaseSlice]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -11860,6 +14287,375 @@ class SubtractAssign(BaseAugOp, BaseMatcherNode): ] = DoNotCare() +BaseTemplatedStringContentMatchType = Union[ + "BaseTemplatedStringContent", + MetadataMatchType, + MatchIfTrue[cst.BaseTemplatedStringContent], +] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TemplatedString(BaseExpression, BaseString, BaseMatcherNode): + parts: Union[ + Sequence[ + Union[ + BaseTemplatedStringContentMatchType, + DoNotCareSentinel, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + AtLeastN[ + Union[ + BaseTemplatedStringContentMatchType, + DoNotCareSentinel, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + ] + ], + AtMostN[ + Union[ + BaseTemplatedStringContentMatchType, + DoNotCareSentinel, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.BaseTemplatedStringContent]], + OneOf[ + Union[ + Sequence[ + Union[ + BaseTemplatedStringContentMatchType, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + AtLeastN[ + Union[ + BaseTemplatedStringContentMatchType, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + ] + ], + AtMostN[ + Union[ + BaseTemplatedStringContentMatchType, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.BaseTemplatedStringContent]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + BaseTemplatedStringContentMatchType, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + AtLeastN[ + Union[ + BaseTemplatedStringContentMatchType, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + ] + ], + AtMostN[ + Union[ + BaseTemplatedStringContentMatchType, + OneOf[BaseTemplatedStringContentMatchType], + AllOf[BaseTemplatedStringContentMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.BaseTemplatedStringContent]], + ] + ], + ] = DoNotCare() + start: Union[ + strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] + ] = DoNotCare() + end: Union[ + Literal['"', "'", '"""', "'''"], + MetadataMatchType, + MatchIfTrue[Literal['"', "'", '"""', "'''"]], + DoNotCareSentinel, + OneOf[ + Union[ + Literal['"', "'", '"""', "'''"], + MetadataMatchType, + MatchIfTrue[Literal['"', "'", '"""', "'''"]], + ] + ], + AllOf[ + Union[ + Literal['"', "'", '"""', "'''"], + MetadataMatchType, + MatchIfTrue[Literal['"', "'", '"""', "'''"]], + ] + ], + ] = DoNotCare() + lpar: Union[ + Sequence[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.LeftParen]], + OneOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + AtLeastN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + AtMostN[ + Union[ + LeftParenMatchType, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.LeftParen]], + ] + ], + ] = DoNotCare() + rpar: Union[ + Sequence[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.RightParen]], + OneOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + AtLeastN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + AtMostN[ + Union[ + RightParenMatchType, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.RightParen]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TemplatedStringExpression(BaseTemplatedStringContent, BaseMatcherNode): + expression: Union[ + BaseExpressionMatchType, + DoNotCareSentinel, + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], + ] = DoNotCare() + conversion: Union[ + Optional[str], + MetadataMatchType, + MatchIfTrue[Optional[str]], + DoNotCareSentinel, + OneOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], + AllOf[Union[Optional[str], MetadataMatchType, MatchIfTrue[Optional[str]]]], + ] = DoNotCare() + format_spec: Union[ + Optional[Sequence["BaseTemplatedStringContent"]], + MetadataMatchType, + MatchIfTrue[Optional[Sequence[cst.BaseTemplatedStringContent]]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional[Sequence["BaseTemplatedStringContent"]], + MetadataMatchType, + MatchIfTrue[Optional[Sequence[cst.BaseTemplatedStringContent]]], + ] + ], + AllOf[ + Union[ + Optional[Sequence["BaseTemplatedStringContent"]], + MetadataMatchType, + MatchIfTrue[Optional[Sequence[cst.BaseTemplatedStringContent]]], + ] + ], + ] = DoNotCare() + whitespace_before_expression: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_expression: Union[ + BaseParenthesizableWhitespaceMatchType, + DoNotCareSentinel, + OneOf[BaseParenthesizableWhitespaceMatchType], + AllOf[BaseParenthesizableWhitespaceMatchType], + ] = DoNotCare() + equal: Union[ + Optional["AssignEqual"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AssignEqual]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["AssignEqual"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AssignEqual]], + ] + ], + AllOf[ + Union[ + Optional["AssignEqual"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AssignEqual]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TemplatedStringText(BaseTemplatedStringContent, BaseMatcherNode): + value: Union[ + strMatchType, DoNotCareSentinel, OneOf[strMatchType], AllOf[strMatchType] + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + @dataclass(frozen=True, eq=False, unsafe_hash=False) class TrailingWhitespace(BaseMatcherNode): whitespace: Union[ @@ -11869,10 +14665,24 @@ class TrailingWhitespace(BaseMatcherNode): AllOf[SimpleWhitespaceMatchType], ] = DoNotCare() comment: Union[ - CommentOrNoneMatchType, + Optional["Comment"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Comment]], DoNotCareSentinel, - OneOf[CommentOrNoneMatchType], - AllOf[CommentOrNoneMatchType], + OneOf[ + Union[ + Optional["Comment"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Comment]], + ] + ], + AllOf[ + Union[ + Optional["Comment"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Comment]], + ] + ], ] = DoNotCare() newline: Union[ NewlineMatchType, @@ -11889,13 +14699,7 @@ class TrailingWhitespace(BaseMatcherNode): ExceptHandlerMatchType = Union[ - "ExceptHandler", MetadataMatchType, MatchIfTrue[Callable[[cst.ExceptHandler], bool]] -] -FinallyOrNoneMatchType = Union[ - "Finally", - None, - MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.Finally, None]], bool]], + "ExceptHandler", MetadataMatchType, MatchIfTrue[cst.ExceptHandler] ] @@ -11933,7 +14737,7 @@ class Try(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.ExceptHandler]], bool]], + MatchIfTrue[Sequence[cst.ExceptHandler]], OneOf[ Union[ Sequence[ @@ -11957,7 +14761,7 @@ class Try(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ExceptHandler]], bool]], + MatchIfTrue[Sequence[cst.ExceptHandler]], ] ], AllOf[ @@ -11983,21 +14787,41 @@ class Try(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.ExceptHandler]], bool]], + MatchIfTrue[Sequence[cst.ExceptHandler]], ] ], ] = DoNotCare() orelse: Union[ - ElseOrNoneMatchType, + Optional["Else"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Else]], DoNotCareSentinel, - OneOf[ElseOrNoneMatchType], - AllOf[ElseOrNoneMatchType], + OneOf[ + Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] + ], + AllOf[ + Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] + ], ] = DoNotCare() finalbody: Union[ - FinallyOrNoneMatchType, + Optional["Finally"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Finally]], DoNotCareSentinel, - OneOf[FinallyOrNoneMatchType], - AllOf[FinallyOrNoneMatchType], + OneOf[ + Union[ + Optional["Finally"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Finally]], + ] + ], + AllOf[ + Union[ + Optional["Finally"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Finally]], + ] + ], ] = DoNotCare() leading_lines: Union[ Sequence[ @@ -12025,7 +14849,7 @@ class Try(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -12049,7 +14873,7 @@ class Try(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -12075,7 +14899,226 @@ class Try(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + ] = DoNotCare() + whitespace_before_colon: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +ExceptStarHandlerMatchType = Union[ + "ExceptStarHandler", MetadataMatchType, MatchIfTrue[cst.ExceptStarHandler] +] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TryStar(BaseCompoundStatement, BaseStatement, BaseMatcherNode): + body: Union[ + BaseSuiteMatchType, + DoNotCareSentinel, + OneOf[BaseSuiteMatchType], + AllOf[BaseSuiteMatchType], + ] = DoNotCare() + handlers: Union[ + Sequence[ + Union[ + ExceptStarHandlerMatchType, + DoNotCareSentinel, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + AtLeastN[ + Union[ + ExceptStarHandlerMatchType, + DoNotCareSentinel, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + ] + ], + AtMostN[ + Union[ + ExceptStarHandlerMatchType, + DoNotCareSentinel, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.ExceptStarHandler]], + OneOf[ + Union[ + Sequence[ + Union[ + ExceptStarHandlerMatchType, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + AtLeastN[ + Union[ + ExceptStarHandlerMatchType, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + ] + ], + AtMostN[ + Union[ + ExceptStarHandlerMatchType, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.ExceptStarHandler]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + ExceptStarHandlerMatchType, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + AtLeastN[ + Union[ + ExceptStarHandlerMatchType, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + ] + ], + AtMostN[ + Union[ + ExceptStarHandlerMatchType, + OneOf[ExceptStarHandlerMatchType], + AllOf[ExceptStarHandlerMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.ExceptStarHandler]], + ] + ], + ] = DoNotCare() + orelse: Union[ + Optional["Else"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Else]], + DoNotCareSentinel, + OneOf[ + Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] + ], + AllOf[ + Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] + ], + ] = DoNotCare() + finalbody: Union[ + Optional["Finally"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Finally]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["Finally"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Finally]], + ] + ], + AllOf[ + Union[ + Optional["Finally"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Finally]], + ] + ], + ] = DoNotCare() + leading_lines: Union[ + Sequence[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + DoNotCareSentinel, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.EmptyLine]], + OneOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + AtLeastN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + AtMostN[ + Union[ + EmptyLineMatchType, + OneOf[EmptyLineMatchType], + AllOf[EmptyLineMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -12123,7 +15166,7 @@ class Tuple( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], OneOf[ Union[ Sequence[ @@ -12147,7 +15190,7 @@ class Tuple( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], ] ], AllOf[ @@ -12173,7 +15216,7 @@ class Tuple( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.BaseElement]], bool]], + MatchIfTrue[Sequence[cst.BaseElement]], ] ], ] = DoNotCare() @@ -12203,7 +15246,7 @@ class Tuple( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -12227,7 +15270,7 @@ class Tuple( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -12253,7 +15296,7 @@ class Tuple( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -12283,7 +15326,7 @@ class Tuple( ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -12307,7 +15350,7 @@ class Tuple( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -12333,7 +15376,7 @@ class Tuple( ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -12345,8 +15388,309 @@ class Tuple( ] = DoNotCare() +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TypeAlias(BaseSmallStatement, BaseMatcherNode): + name: Union[ + NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] + ] = DoNotCare() + value: Union[ + BaseExpressionMatchType, + DoNotCareSentinel, + OneOf[BaseExpressionMatchType], + AllOf[BaseExpressionMatchType], + ] = DoNotCare() + type_parameters: Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + ] + ], + AllOf[ + Union[ + Optional["TypeParameters"], + MetadataMatchType, + MatchIfTrue[Optional[cst.TypeParameters]], + ] + ], + ] = DoNotCare() + whitespace_after_type: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_name: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_type_parameters: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + whitespace_after_equals: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + semicolon: Union[ + SemicolonMatchType, + DoNotCareSentinel, + OneOf[SemicolonMatchType], + AllOf[SemicolonMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +TypeVarOrTypeVarTupleOrParamSpecMatchType = Union[ + "TypeVar", + "TypeVarTuple", + "ParamSpec", + MetadataMatchType, + MatchIfTrue[Union[cst.TypeVar, cst.TypeVarTuple, cst.ParamSpec]], +] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TypeParam(BaseMatcherNode): + param: Union[ + TypeVarOrTypeVarTupleOrParamSpecMatchType, + DoNotCareSentinel, + OneOf[TypeVarOrTypeVarTupleOrParamSpecMatchType], + AllOf[TypeVarOrTypeVarTupleOrParamSpecMatchType], + ] = DoNotCare() + comma: Union[ + CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] + ] = DoNotCare() + equal: Union[ + AssignEqualMatchType, + DoNotCareSentinel, + OneOf[AssignEqualMatchType], + AllOf[AssignEqualMatchType], + ] = DoNotCare() + star: Union[ + Literal["", "*"], + MetadataMatchType, + MatchIfTrue[Literal["", "*"]], + DoNotCareSentinel, + OneOf[ + Union[Literal["", "*"], MetadataMatchType, MatchIfTrue[Literal["", "*"]]] + ], + AllOf[ + Union[Literal["", "*"], MetadataMatchType, MatchIfTrue[Literal["", "*"]]] + ], + ] = DoNotCare() + whitespace_after_star: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + default: Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +TypeParamMatchType = Union["TypeParam", MetadataMatchType, MatchIfTrue[cst.TypeParam]] + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TypeParameters(BaseMatcherNode): + params: Union[ + Sequence[ + Union[ + TypeParamMatchType, + DoNotCareSentinel, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + AtLeastN[ + Union[ + TypeParamMatchType, + DoNotCareSentinel, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + ] + ], + AtMostN[ + Union[ + TypeParamMatchType, + DoNotCareSentinel, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + ] + ], + ] + ], + DoNotCareSentinel, + MatchIfTrue[Sequence[cst.TypeParam]], + OneOf[ + Union[ + Sequence[ + Union[ + TypeParamMatchType, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + AtLeastN[ + Union[ + TypeParamMatchType, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + ] + ], + AtMostN[ + Union[ + TypeParamMatchType, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.TypeParam]], + ] + ], + AllOf[ + Union[ + Sequence[ + Union[ + TypeParamMatchType, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + AtLeastN[ + Union[ + TypeParamMatchType, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + ] + ], + AtMostN[ + Union[ + TypeParamMatchType, + OneOf[TypeParamMatchType], + AllOf[TypeParamMatchType], + ] + ], + ] + ], + MatchIfTrue[Sequence[cst.TypeParam]], + ] + ], + ] = DoNotCare() + lbracket: Union[ + LeftSquareBracketMatchType, + DoNotCareSentinel, + OneOf[LeftSquareBracketMatchType], + AllOf[LeftSquareBracketMatchType], + ] = DoNotCare() + rbracket: Union[ + RightSquareBracketMatchType, + DoNotCareSentinel, + OneOf[RightSquareBracketMatchType], + AllOf[RightSquareBracketMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TypeVar(BaseMatcherNode): + name: Union[ + NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] + ] = DoNotCare() + bound: Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + DoNotCareSentinel, + OneOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + AllOf[ + Union[ + Optional["BaseExpression"], + MetadataMatchType, + MatchIfTrue[Optional[cst.BaseExpression]], + ] + ], + ] = DoNotCare() + colon: Union[ + ColonMatchType, DoNotCareSentinel, OneOf[ColonMatchType], AllOf[ColonMatchType] + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + +@dataclass(frozen=True, eq=False, unsafe_hash=False) +class TypeVarTuple(BaseMatcherNode): + name: Union[ + NameMatchType, DoNotCareSentinel, OneOf[NameMatchType], AllOf[NameMatchType] + ] = DoNotCare() + whitespace_after_star: Union[ + SimpleWhitespaceMatchType, + DoNotCareSentinel, + OneOf[SimpleWhitespaceMatchType], + AllOf[SimpleWhitespaceMatchType], + ] = DoNotCare() + metadata: Union[ + MetadataMatchType, + DoNotCareSentinel, + OneOf[MetadataMatchType], + AllOf[MetadataMatchType], + ] = DoNotCare() + + BaseUnaryOpMatchType = Union[ - "BaseUnaryOp", MetadataMatchType, MatchIfTrue[Callable[[cst.BaseUnaryOp], bool]] + "BaseUnaryOp", MetadataMatchType, MatchIfTrue[cst.BaseUnaryOp] ] @@ -12390,7 +15734,7 @@ class UnaryOperation(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -12414,7 +15758,7 @@ class UnaryOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -12440,7 +15784,7 @@ class UnaryOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -12470,7 +15814,7 @@ class UnaryOperation(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -12494,7 +15838,7 @@ class UnaryOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -12520,7 +15864,7 @@ class UnaryOperation(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -12547,10 +15891,16 @@ class While(BaseCompoundStatement, BaseStatement, BaseMatcherNode): AllOf[BaseSuiteMatchType], ] = DoNotCare() orelse: Union[ - ElseOrNoneMatchType, + Optional["Else"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Else]], DoNotCareSentinel, - OneOf[ElseOrNoneMatchType], - AllOf[ElseOrNoneMatchType], + OneOf[ + Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] + ], + AllOf[ + Union[Optional["Else"], MetadataMatchType, MatchIfTrue[Optional[cst.Else]]] + ], ] = DoNotCare() leading_lines: Union[ Sequence[ @@ -12578,7 +15928,7 @@ class While(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -12602,7 +15952,7 @@ class While(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -12628,7 +15978,7 @@ class While(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() @@ -12652,9 +16002,7 @@ class While(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] = DoNotCare() -WithItemMatchType = Union[ - "WithItem", MetadataMatchType, MatchIfTrue[Callable[[cst.WithItem], bool]] -] +WithItemMatchType = Union["WithItem", MetadataMatchType, MatchIfTrue[cst.WithItem]] @dataclass(frozen=True, eq=False, unsafe_hash=False) @@ -12685,7 +16033,7 @@ class With(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.WithItem]], bool]], + MatchIfTrue[Sequence[cst.WithItem]], OneOf[ Union[ Sequence[ @@ -12709,7 +16057,7 @@ class With(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.WithItem]], bool]], + MatchIfTrue[Sequence[cst.WithItem]], ] ], AllOf[ @@ -12735,7 +16083,7 @@ class With(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.WithItem]], bool]], + MatchIfTrue[Sequence[cst.WithItem]], ] ], ] = DoNotCare() @@ -12746,10 +16094,24 @@ class With(BaseCompoundStatement, BaseStatement, BaseMatcherNode): AllOf[BaseSuiteMatchType], ] = DoNotCare() asynchronous: Union[ - AsynchronousOrNoneMatchType, + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], DoNotCareSentinel, - OneOf[AsynchronousOrNoneMatchType], - AllOf[AsynchronousOrNoneMatchType], + OneOf[ + Union[ + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], + ] + ], + AllOf[ + Union[ + Optional["Asynchronous"], + MetadataMatchType, + MatchIfTrue[Optional[cst.Asynchronous]], + ] + ], ] = DoNotCare() leading_lines: Union[ Sequence[ @@ -12777,7 +16139,7 @@ class With(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], OneOf[ Union[ Sequence[ @@ -12801,7 +16163,7 @@ class With(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], AllOf[ @@ -12827,10 +16189,22 @@ class With(BaseCompoundStatement, BaseStatement, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.EmptyLine]], bool]], + MatchIfTrue[Sequence[cst.EmptyLine]], ] ], ] = DoNotCare() + lpar: Union[ + LeftParenMatchType, + DoNotCareSentinel, + OneOf[LeftParenMatchType], + AllOf[LeftParenMatchType], + ] = DoNotCare() + rpar: Union[ + RightParenMatchType, + DoNotCareSentinel, + OneOf[RightParenMatchType], + AllOf[RightParenMatchType], + ] = DoNotCare() whitespace_after_with: Union[ SimpleWhitespaceMatchType, DoNotCareSentinel, @@ -12860,10 +16234,20 @@ class WithItem(BaseMatcherNode): AllOf[BaseExpressionMatchType], ] = DoNotCare() asname: Union[ - AsNameOrNoneMatchType, + Optional["AsName"], + MetadataMatchType, + MatchIfTrue[Optional[cst.AsName]], DoNotCareSentinel, - OneOf[AsNameOrNoneMatchType], - AllOf[AsNameOrNoneMatchType], + OneOf[ + Union[ + Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] + ] + ], + AllOf[ + Union[ + Optional["AsName"], MetadataMatchType, MatchIfTrue[Optional[cst.AsName]] + ] + ], ] = DoNotCare() comma: Union[ CommaMatchType, DoNotCareSentinel, OneOf[CommaMatchType], AllOf[CommaMatchType] @@ -12881,7 +16265,7 @@ BaseExpressionOrFromOrNoneMatchType = Union[ "From", None, MetadataMatchType, - MatchIfTrue[Callable[[Union[cst.BaseExpression, cst.From, None]], bool]], + MatchIfTrue[Union[cst.BaseExpression, cst.From, None]], ] @@ -12919,7 +16303,7 @@ class Yield(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], OneOf[ Union[ Sequence[ @@ -12943,7 +16327,7 @@ class Yield(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], AllOf[ @@ -12969,7 +16353,7 @@ class Yield(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.LeftParen]], bool]], + MatchIfTrue[Sequence[cst.LeftParen]], ] ], ] = DoNotCare() @@ -12999,7 +16383,7 @@ class Yield(BaseExpression, BaseMatcherNode): ] ], DoNotCareSentinel, - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], OneOf[ Union[ Sequence[ @@ -13023,7 +16407,7 @@ class Yield(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], AllOf[ @@ -13049,7 +16433,7 @@ class Yield(BaseExpression, BaseMatcherNode): ], ] ], - MatchIfTrue[Callable[[Sequence[cst.RightParen]], bool]], + MatchIfTrue[Sequence[cst.RightParen]], ] ], ] = DoNotCare() @@ -13111,6 +16495,7 @@ __all__ = [ "BaseStatement", "BaseString", "BaseSuite", + "BaseTemplatedStringContent", "BaseUnaryOp", "BinaryOperation", "BitAnd", @@ -13150,6 +16535,7 @@ __all__ = [ "EmptyLine", "Equal", "ExceptHandler", + "ExceptStarHandler", "Expr", "Finally", "Float", @@ -13188,11 +16574,28 @@ __all__ = [ "LessThanEqual", "List", "ListComp", + "Match", + "MatchAs", + "MatchCase", + "MatchClass", "MatchDecoratorMismatch", "MatchIfTrue", + "MatchKeywordElement", + "MatchList", + "MatchMapping", + "MatchMappingElement", "MatchMetadata", "MatchMetadataIfTrue", + "MatchOr", + "MatchOrElement", + "MatchPattern", "MatchRegex", + "MatchSequence", + "MatchSequenceElement", + "MatchSingleton", + "MatchStar", + "MatchTuple", + "MatchValue", "MatcherDecoratableTransformer", "MatcherDecoratableVisitor", "MatrixMultiply", @@ -13215,6 +16618,7 @@ __all__ = [ "Or", "Param", "ParamSlash", + "ParamSpec", "ParamStar", "Parameters", "ParenthesizedWhitespace", @@ -13244,10 +16648,19 @@ __all__ = [ "SubscriptElement", "Subtract", "SubtractAssign", + "TemplatedString", + "TemplatedStringExpression", + "TemplatedStringText", "TrailingWhitespace", "Try", + "TryStar", "Tuple", + "TypeAlias", "TypeOf", + "TypeParam", + "TypeParameters", + "TypeVar", + "TypeVarTuple", "UnaryOperation", "While", "With", diff --git a/libcst/matchers/_decorators.py b/libcst/matchers/_decorators.py index 7dd2e741..ea69178f 100644 --- a/libcst/matchers/_decorators.py +++ b/libcst/matchers/_decorators.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,7 +7,6 @@ from typing import Callable, TypeVar from libcst.matchers._matcher_base import BaseMatcherNode - _CSTVisitFuncT = TypeVar("_CSTVisitFuncT") @@ -19,6 +18,7 @@ CONSTRUCTED_LEAVE_MATCHER_ATTR: str = "_leave_matcher" def call_if_inside( matcher: BaseMatcherNode, + # pyre-fixme[34]: `Variable[_CSTVisitFuncT]` isn't present in the function's parameters. ) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT]: """ A decorator for visit and leave methods inside a :class:`MatcherDecoratableTransformer` @@ -30,12 +30,10 @@ def call_if_inside( """ def inner(original: _CSTVisitFuncT) -> _CSTVisitFuncT: - if not hasattr(original, VISIT_POSITIVE_MATCHER_ATTR): - setattr(original, VISIT_POSITIVE_MATCHER_ATTR, []) setattr( original, VISIT_POSITIVE_MATCHER_ATTR, - [*getattr(original, VISIT_POSITIVE_MATCHER_ATTR), matcher], + [*getattr(original, VISIT_POSITIVE_MATCHER_ATTR, []), matcher], ) return original @@ -44,6 +42,7 @@ def call_if_inside( def call_if_not_inside( matcher: BaseMatcherNode, + # pyre-fixme[34]: `Variable[_CSTVisitFuncT]` isn't present in the function's parameters. ) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT]: """ A decorator for visit and leave methods inside a :class:`MatcherDecoratableTransformer` @@ -56,18 +55,17 @@ def call_if_not_inside( """ def inner(original: _CSTVisitFuncT) -> _CSTVisitFuncT: - if not hasattr(original, VISIT_NEGATIVE_MATCHER_ATTR): - setattr(original, VISIT_NEGATIVE_MATCHER_ATTR, []) setattr( original, VISIT_NEGATIVE_MATCHER_ATTR, - [*getattr(original, VISIT_NEGATIVE_MATCHER_ATTR), matcher], + [*getattr(original, VISIT_NEGATIVE_MATCHER_ATTR, []), matcher], ) return original return inner +# pyre-fixme[34]: `Variable[_CSTVisitFuncT]` isn't present in the function's parameters. def visit(matcher: BaseMatcherNode) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT]: """ A decorator that allows a method inside a :class:`MatcherDecoratableTransformer` @@ -86,18 +84,17 @@ def visit(matcher: BaseMatcherNode) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT """ def inner(original: _CSTVisitFuncT) -> _CSTVisitFuncT: - if not hasattr(original, CONSTRUCTED_VISIT_MATCHER_ATTR): - setattr(original, CONSTRUCTED_VISIT_MATCHER_ATTR, []) setattr( original, CONSTRUCTED_VISIT_MATCHER_ATTR, - [*getattr(original, CONSTRUCTED_VISIT_MATCHER_ATTR), matcher], + [*getattr(original, CONSTRUCTED_VISIT_MATCHER_ATTR, []), matcher], ) return original return inner +# pyre-fixme[34]: `Variable[_CSTVisitFuncT]` isn't present in the function's parameters. def leave(matcher: BaseMatcherNode) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT]: """ A decorator that allows a method inside a :class:`MatcherDecoratableTransformer` @@ -113,12 +110,10 @@ def leave(matcher: BaseMatcherNode) -> Callable[[_CSTVisitFuncT], _CSTVisitFuncT """ def inner(original: _CSTVisitFuncT) -> _CSTVisitFuncT: - if not hasattr(original, CONSTRUCTED_LEAVE_MATCHER_ATTR): - setattr(original, CONSTRUCTED_LEAVE_MATCHER_ATTR, []) setattr( original, CONSTRUCTED_LEAVE_MATCHER_ATTR, - [*getattr(original, CONSTRUCTED_LEAVE_MATCHER_ATTR), matcher], + [*getattr(original, CONSTRUCTED_LEAVE_MATCHER_ATTR, []), matcher], ) return original diff --git a/libcst/matchers/_matcher_base.py b/libcst/matchers/_matcher_base.py index 70a9340a..1727f0df 100644 --- a/libcst/matchers/_matcher_base.py +++ b/libcst/matchers/_matcher_base.py @@ -1,17 +1,17 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections.abc -import copy import inspect import re from abc import ABCMeta from dataclasses import dataclass, fields -from enum import Enum, auto +from enum import auto, Enum from typing import ( Callable, + cast, Dict, Generic, Iterator, @@ -25,12 +25,12 @@ from typing import ( Type, TypeVar, Union, - cast, ) import libcst import libcst.metadata as meta -from libcst import MaybeSentinel, RemovalSentinel +from libcst import CSTLogicError, FlattenSentinel, MaybeSentinel, RemovalSentinel +from libcst._metadata_dependent import LazyValue class DoNotCareSentinel(Enum): @@ -49,7 +49,7 @@ class DoNotCareSentinel(Enum): _MatcherT = TypeVar("_MatcherT", covariant=True) -_CallableT = TypeVar("_CallableT", bound="Callable", covariant=True) +_MatchIfTrueT = TypeVar("_MatchIfTrueT", covariant=True) _BaseMatcherNodeSelfT = TypeVar("_BaseMatcherNodeSelfT", bound="BaseMatcherNode") _OtherNodeT = TypeVar("_OtherNodeT") _MetadataValueT = TypeVar("_MetadataValueT") @@ -69,6 +69,7 @@ class AbstractBaseMatcherNodeMeta(ABCMeta): matcher. """ + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, node: Type["BaseMatcherNode"]) -> "TypeOf[Type[BaseMatcherNode]]": return TypeOf(self, node) @@ -82,23 +83,16 @@ class BaseMatcherNode: several concrete matchers as options. """ + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__( self: _BaseMatcherNodeSelfT, other: _OtherNodeT ) -> "OneOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]]": - # Without a cast, pyre thinks that the below OneOf is type OneOf[object] - # even though it has the types passed into it. - return cast( - OneOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]], OneOf(self, other) - ) + return OneOf(self, other) def __and__( self: _BaseMatcherNodeSelfT, other: _OtherNodeT ) -> "AllOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]]": - # Without a cast, pyre thinks that the below AllOf is type AllOf[object] - # even though it has the types passed into it. - return cast( - AllOf[Union[_BaseMatcherNodeSelfT, _OtherNodeT]], AllOf(self, other) - ) + return AllOf(self, other) def __invert__(self: _BaseMatcherNodeSelfT) -> "_BaseMatcherNodeSelfT": return cast(_BaseMatcherNodeSelfT, _InverseOf(self)) @@ -149,7 +143,7 @@ class TypeOf(Generic[_MatcherTypeT], BaseMatcherNode): for option in options: if isinstance(option, TypeOf): if option.initalized: - raise Exception( + raise ValueError( "Cannot chain an uninitalized TypeOf with an initalized one" ) actual_options.extend(option._raw_options) @@ -176,11 +170,14 @@ class TypeOf(Generic[_MatcherTypeT], BaseMatcherNode): self._call_items = (args, kwargs) return self + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__( self, other: _OtherNodeMatcherTypeT ) -> "TypeOf[Union[_MatcherTypeT, _OtherNodeMatcherTypeT]]": return TypeOf[Union[_MatcherTypeT, _OtherNodeMatcherTypeT]](self, other) + # pyre-fixme[14]: `__and__` overrides method defined in `BaseMatcherNode` + # inconsistently. def __and__(self, other: _OtherNodeMatcherTypeT) -> NoReturn: left, right = type(self).__name__, other.__name__ raise TypeError( @@ -210,25 +207,14 @@ class OneOf(Generic[_MatcherT], BaseMatcherNode): m.Name("True") | m.Name("False") - Note that a :class:`OneOf` matcher can be used anywhere you are defining - a matcher attribute. So, an alternate form to the first example looks like:: - - m.Name(m.OneOf("True", "False")) - - A downside to the alternate form is that you can no longer use Python's - bitwise or operator to construct the :class:`OneOf` since it is not defined - for strings. However, the upside is that it is more concise. We do not - recommend any one form over the other, and leave it up to you to decide what - is best for your use case. - """ def __init__(self, *options: Union[_MatcherT, "OneOf[_MatcherT]"]) -> None: actual_options: List[_MatcherT] = [] for option in options: if isinstance(option, AllOf): - raise Exception("Cannot use AllOf and OneOf in combination!") - elif isinstance(option, OneOf): + raise ValueError("Cannot use AllOf and OneOf in combination!") + elif isinstance(option, (OneOf, TypeOf)): actual_options.extend(option.options) else: actual_options.append(option) @@ -243,17 +229,16 @@ class OneOf(Generic[_MatcherT], BaseMatcherNode): """ return self._options + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> "OneOf[Union[_MatcherT, _OtherNodeT]]": - # Without a cast, pyre thinks that the below OneOf is type OneOf[object] - # even though it has the types passed into it. - return cast(OneOf[Union[_MatcherT, _OtherNodeT]], OneOf(self, other)) + return OneOf(self, other) def __and__(self, other: _OtherNodeT) -> NoReturn: - raise Exception("Cannot use AllOf and OneOf in combination!") + raise ValueError("Cannot use AllOf and OneOf in combination!") def __invert__(self) -> "AllOf[_MatcherT]": # Invert using De Morgan's Law so we don't have to complicate types. - return cast(AllOf[_MatcherT], AllOf(*[DoesNotMatch(m) for m in self._options])) + return AllOf(*[DoesNotMatch(m) for m in self._options]) def __repr__(self) -> str: return f"OneOf({', '.join([repr(o) for o in self._options])})" @@ -301,7 +286,9 @@ class AllOf(Generic[_MatcherT], BaseMatcherNode): actual_options: List[_MatcherT] = [] for option in options: if isinstance(option, OneOf): - raise Exception("Cannot use AllOf and OneOf in combination!") + raise ValueError("Cannot use AllOf and OneOf in combination!") + elif isinstance(option, TypeOf): + raise ValueError("Cannot use AllOf and TypeOf in combination!") elif isinstance(option, AllOf): actual_options.extend(option.options) else: @@ -317,17 +304,16 @@ class AllOf(Generic[_MatcherT], BaseMatcherNode): """ return self._options + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> NoReturn: - raise Exception("Cannot use AllOf and OneOf in combination!") + raise ValueError("Cannot use AllOf and OneOf in combination!") def __and__(self, other: _OtherNodeT) -> "AllOf[Union[_MatcherT, _OtherNodeT]]": - # Without a cast, pyre thinks that the below AllOf is type AllOf[object] - # even though it has the types passed into it. - return cast(AllOf[Union[_MatcherT, _OtherNodeT]], AllOf(self, other)) + return AllOf(self, other) def __invert__(self) -> "OneOf[_MatcherT]": # Invert using De Morgan's Law so we don't have to complicate types. - return cast(OneOf[_MatcherT], OneOf(*[DoesNotMatch(m) for m in self._options])) + return OneOf(*[DoesNotMatch(m) for m in self._options]) def __repr__(self) -> str: return f"AllOf({', '.join([repr(o) for o in self._options])})" @@ -364,6 +350,7 @@ class _InverseOf(Generic[_MatcherT]): """ return self._matcher + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> "OneOf[Union[_MatcherT, _OtherNodeT]]": # Without a cast, pyre thinks that the below OneOf is type OneOf[object] # even though it has the types passed into it. @@ -433,6 +420,7 @@ class _ExtractMatchingNode(Generic[_MatcherT]): """ return self._name + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> "OneOf[Union[_MatcherT, _OtherNodeT]]": # Without a cast, pyre thinks that the below OneOf is type OneOf[object] # even though it has the types passed into it. @@ -443,7 +431,7 @@ class _ExtractMatchingNode(Generic[_MatcherT]): # that are captured with an and, either all of them will be assigned the # same node, or none of them. It makes more sense to move the SaveMatchedNode # up to wrap the AllOf. - raise Exception( + raise ValueError( ( "Cannot use AllOf with SavedMatchedNode children! Instead, you should " + "use SaveMatchedNode(AllOf(options...))." @@ -459,10 +447,10 @@ class _ExtractMatchingNode(Generic[_MatcherT]): def __invert__(self) -> "_MatcherT": # This doesn't make sense. We don't want to capture a node only if it # doesn't match, since this will never capture anything. - raise Exception( + raise ValueError( ( "Cannot invert a SaveMatchedNode. Instead you should wrap SaveMatchedNode " - + "around your inversion itself" + "around your inversion itself" ) ) @@ -472,7 +460,7 @@ class _ExtractMatchingNode(Generic[_MatcherT]): ) -class MatchIfTrue(Generic[_CallableT]): +class MatchIfTrue(Generic[_MatchIfTrueT]): """ Matcher that matches if its child callable returns ``True``. The child callable should take one argument which is the attribute on the LibCST node we are @@ -491,13 +479,13 @@ class MatchIfTrue(Generic[_CallableT]): you are passing to :func:`matches`. """ - def __init__(self, func: _CallableT) -> None: - # Without a cast, pyre thinks that self.func is not a function, even though - # it recognizes that it is a _CallableT bound to Callable. - self._func: Callable[[object], bool] = cast(Callable[[object], bool], func) + _func: Callable[[_MatchIfTrueT], bool] + + def __init__(self, func: Callable[[_MatchIfTrueT], bool]) -> None: + self._func = func @property - def func(self) -> Callable[[object], bool]: + def func(self) -> Callable[[_MatchIfTrueT], bool]: """ The function that we will call with a LibCST node in order to determine if we match. If the function returns ``True`` then we consider ourselves @@ -505,35 +493,27 @@ class MatchIfTrue(Generic[_CallableT]): """ return self._func + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__( self, other: _OtherNodeT - ) -> "OneOf[Union[MatchIfTrue[_CallableT], _OtherNodeT]]": - # Without a cast, pyre thinks that the below OneOf is type OneOf[object] - # even though it has the types passed into it. - return cast( - OneOf[Union[MatchIfTrue[_CallableT], _OtherNodeT]], OneOf(self, other) - ) + ) -> "OneOf[Union[MatchIfTrue[_MatchIfTrueT], _OtherNodeT]]": + return OneOf(self, other) def __and__( self, other: _OtherNodeT - ) -> "AllOf[Union[MatchIfTrue[_CallableT], _OtherNodeT]]": - # Without a cast, pyre thinks that the below AllOf is type AllOf[object] - # even though it has the types passed into it. - return cast( - AllOf[Union[MatchIfTrue[_CallableT], _OtherNodeT]], AllOf(self, other) - ) + ) -> "AllOf[Union[MatchIfTrue[_MatchIfTrueT], _OtherNodeT]]": + return AllOf(self, other) - def __invert__(self) -> "MatchIfTrue[_CallableT]": + def __invert__(self) -> "MatchIfTrue[_MatchIfTrueT]": # Construct a wrapped version of MatchIfTrue for typing simplicity. # Without the cast, pyre doesn't seem to think the lambda is valid. - return MatchIfTrue(cast(_CallableT, lambda val: not self._func(val))) + return MatchIfTrue(lambda val: not self._func(val)) def __repr__(self) -> str: - # pyre-ignore Pyre doesn't believe that functions have a repr. return f"MatchIfTrue({repr(self._func)})" -def MatchRegex(regex: Union[str, Pattern[str]]) -> MatchIfTrue[Callable[[str], bool]]: +def MatchRegex(regex: Union[str, Pattern[str]]) -> MatchIfTrue[str]: """ Used as a convenience wrapper to :class:`MatchIfTrue` which allows for matching a string attribute against a regex. ``regex`` can be any regular @@ -552,7 +532,6 @@ def MatchRegex(regex: Union[str, Pattern[str]]) -> MatchIfTrue[Callable[[str], b def _match_func(value: object) -> bool: if isinstance(value, str): - # pyre-ignore Pyre doesn't think a 'Pattern' can be passed to fullmatch. return bool(re.fullmatch(regex, value)) else: return False @@ -572,8 +551,11 @@ class MatchMetadata(_BaseMetadataMatcher): """ Matcher that looks up the metadata on the current node using the provided metadata provider and compares the value on the node against the value provided - to :class:`MatchMetadata`. If the metadata value does not exist for a particular - node, :class:`MatchMetadata` will always be considered not a match. + to :class:`MatchMetadata`. + If the metadata provider is unresolved, a :class:`LookupError` exeption will be + raised and ask you to provide a :class:`~libcst.metadata.MetadataWrapper`. + If the metadata value does not exist for a particular node, :class:`MatchMetadata` + will be considered not a match. For example, to match against any function call which has one parameter which is used in a load expression context:: @@ -631,13 +613,12 @@ class MatchMetadata(_BaseMetadataMatcher): """ return self._value + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: _OtherNodeT) -> "OneOf[Union[MatchMetadata, _OtherNodeT]]": - # Without the cast, pyre doesn't know this is valid - return cast(OneOf[Union[MatchMetadata, _OtherNodeT]], OneOf(self, other)) + return OneOf(self, other) def __and__(self, other: _OtherNodeT) -> "AllOf[Union[MatchMetadata, _OtherNodeT]]": - # Without the cast, pyre doesn't know this is valid - return cast(AllOf[Union[MatchMetadata, _OtherNodeT]], AllOf(self, other)) + return AllOf(self, other) def __invert__(self) -> "MatchMetadata": # We intentionally lie here, for the same reason given in the documentation @@ -653,8 +634,10 @@ class MatchMetadataIfTrue(_BaseMetadataMatcher): Matcher that looks up the metadata on the current node using the provided metadata provider and passes it to a callable which can inspect the metadata further, returning ``True`` if the matcher should be considered a match. + If the metadata provider is unresolved, a :class:`LookupError` exeption will be + raised and ask you to provide a :class:`~libcst.metadata.MetadataWrapper`. If the metadata value does not exist for a particular node, - :class:`MatchMetadataIfTrue` will always be considered not a match. + :class:`MatchMetadataIfTrue` will be considered not a match. For example, to match against any arg whose qualified name might be ``typing.Dict``:: @@ -713,24 +696,22 @@ class MatchMetadataIfTrue(_BaseMetadataMatcher): """ return self._func + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__( self, other: _OtherNodeT ) -> "OneOf[Union[MatchMetadataIfTrue, _OtherNodeT]]": - # Without the cast, pyre doesn't know this is valid - return cast(OneOf[Union[MatchMetadataIfTrue, _OtherNodeT]], OneOf(self, other)) + return OneOf(self, other) def __and__( self, other: _OtherNodeT ) -> "AllOf[Union[MatchMetadataIfTrue, _OtherNodeT]]": - # Without the cast, pyre doesn't know this is valid - return cast(AllOf[Union[MatchMetadataIfTrue, _OtherNodeT]], AllOf(self, other)) + return AllOf(self, other) def __invert__(self) -> "MatchMetadataIfTrue": # Construct a wrapped version of MatchMetadataIfTrue for typing simplicity. return MatchMetadataIfTrue(self._key, lambda val: not self._func(val)) def __repr__(self) -> str: - # pyre-ignore Pyre doesn't believe that functions have a repr. return f"MatchMetadataIfTrue(key={repr(self._key)}, func={repr(self._func)})" @@ -780,7 +761,9 @@ class AtLeastN(Generic[_MatcherT], _BaseWildcardNode): n: int, ) -> None: if n < 0: - raise Exception(f"{self.__class__.__name__} n attribute must be positive") + raise ValueError( + f"{self.__class__.__qualname__} n attribute must be positive" + ) self._n: int = n self._matcher: Union[_MatcherT, DoNotCareSentinel] = matcher @@ -801,14 +784,15 @@ class AtLeastN(Generic[_MatcherT], _BaseWildcardNode): """ return self._matcher + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: object) -> NoReturn: - raise Exception("AtLeastN cannot be used in a OneOf matcher") + raise ValueError("AtLeastN cannot be used in a OneOf matcher") def __and__(self, other: object) -> NoReturn: - raise Exception("AtLeastN cannot be used in an AllOf matcher") + raise ValueError("AtLeastN cannot be used in an AllOf matcher") def __invert__(self) -> NoReturn: - raise Exception("Cannot invert an AtLeastN matcher!") + raise ValueError("Cannot invert an AtLeastN matcher!") def __repr__(self) -> str: if self._n == 0: @@ -818,7 +802,7 @@ class AtLeastN(Generic[_MatcherT], _BaseWildcardNode): def ZeroOrMore( - matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT + matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT, ) -> AtLeastN[Union[_MatcherT, DoNotCareSentinel]]: """ Used as a convenience wrapper to :class:`AtLeastN` when ``n`` is equal to ``0``. @@ -881,7 +865,9 @@ class AtMostN(Generic[_MatcherT], _BaseWildcardNode): n: int, ) -> None: if n < 0: - raise Exception(f"{self.__class__.__name__} n attribute must be positive") + raise ValueError( + f"{self.__class__.__qualname__} n attribute must be positive" + ) self._n: int = n self._matcher: Union[_MatcherT, DoNotCareSentinel] = matcher @@ -903,14 +889,15 @@ class AtMostN(Generic[_MatcherT], _BaseWildcardNode): """ return self._matcher + # pyre-fixme[15]: `__or__` overrides method defined in `type` inconsistently. def __or__(self, other: object) -> NoReturn: - raise Exception("AtMostN cannot be used in a OneOf matcher") + raise ValueError("AtMostN cannot be used in a OneOf matcher") def __and__(self, other: object) -> NoReturn: - raise Exception("AtMostN cannot be used in an AllOf matcher") + raise ValueError("AtMostN cannot be used in an AllOf matcher") def __invert__(self) -> NoReturn: - raise Exception("Cannot invert an AtMostN matcher!") + raise ValueError("Cannot invert an AtMostN matcher!") def __repr__(self) -> str: if self._n == 1: @@ -920,7 +907,7 @@ class AtMostN(Generic[_MatcherT], _BaseWildcardNode): def ZeroOrOne( - matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT + matcher: Union[_MatcherT, DoNotCareSentinel] = DoNotCareSentinel.DEFAULT, ) -> AtMostN[Union[_MatcherT, DoNotCareSentinel]]: """ Used as a convenience wrapper to :class:`AtMostN` when ``n`` is equal to ``1``. @@ -991,7 +978,6 @@ def DoesNotMatch(obj: _OtherNodeT) -> _OtherNodeT: ): # We can use the overridden __invert__ in this case. Pyre doesn't think # we can though, and casting doesn't fix the issue. - # pyre-ignore All three types above have overridden __invert__. inverse = ~obj else: # We must wrap in a _InverseOf. @@ -1032,10 +1018,10 @@ def _matches_zero_nodes( matcher: Union[ BaseMatcherNode, _BaseWildcardNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, DoNotCareSentinel, - ] + ], ) -> bool: if isinstance(matcher, AtLeastN) and matcher.n == 0: return True @@ -1062,7 +1048,7 @@ def _sequence_matches( # noqa: C901 Union[ BaseMatcherNode, _BaseWildcardNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, DoNotCareSentinel, ] @@ -1076,6 +1062,7 @@ def _sequence_matches( # noqa: C901 # Base case, we have one or more matcher that wasn't matched if all(_matches_zero_nodes(m) for m in matchers): return _SequenceMatchesResult( + # pyre-ignore[16]: `MatchIfTrue` has no attribute `name`. {m.name: () for m in matchers if isinstance(m, _ExtractMatchingNode)}, (), ) @@ -1111,9 +1098,12 @@ def _sequence_matches( # noqa: C901 metadata_lookup, ) if result.sequence_capture is not None: + matched = result.matched_nodes + assert isinstance(matched, Sequence) return _SequenceMatchesResult( {**attribute_capture, **result.sequence_capture}, - (node, *result.matched_nodes), + # pyre-fixme[6]: Expected `Union[None, Sequence[libcst._n... + (node, *matched), ) # Finally, assume that this does not match the current node. # Consume the matcher but not the node. @@ -1137,9 +1127,12 @@ def _sequence_matches( # noqa: C901 metadata_lookup, ) if result.sequence_capture is not None: + matched = result.matched_nodes + assert isinstance(matched, Sequence) return _SequenceMatchesResult( {**attribute_capture, **result.sequence_capture}, - (node, *result.matched_nodes), + # pyre-fixme[6]: Expected `Union[None, Sequence[libcst._n... + (node, *matched), ) return _SequenceMatchesResult(None, None) else: @@ -1151,9 +1144,12 @@ def _sequence_matches( # noqa: C901 if attribute_capture is not None: result = _sequence_matches(nodes[1:], matchers, metadata_lookup) if result.sequence_capture is not None: + matched = result.matched_nodes + assert isinstance(matched, Sequence) return _SequenceMatchesResult( {**attribute_capture, **result.sequence_capture}, - (node, *result.matched_nodes), + # pyre-fixme[6]: Expected `Union[None, Sequence[libcst._n... + (node, *matched), ) # Now, assume that this does not match the current node. # Consume the matcher but not the node. @@ -1166,7 +1162,7 @@ def _sequence_matches( # noqa: C901 else: # There are no other types of wildcard consumers, but we're making # pyre happy with that fact. - raise Exception(f"Logic error unrecognized wildcard {type(matcher)}!") + raise CSTLogicError(f"Logic error unrecognized wildcard {type(matcher)}!") elif isinstance(matcher, _ExtractMatchingNode): # See if the raw matcher matches. If it does, capture the sequence we matched and store it. result = _sequence_matches( @@ -1246,9 +1242,7 @@ def _attribute_matches( # noqa: C901 if isinstance(node, collections.abc.Sequence): # Given we've generated the types for matchers based on LibCST, we know that # this is true unless the node is badly constructed and types were ignored. - node = cast( - Sequence[Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode]], node - ) + node = cast(Sequence[Union[MaybeSentinel, libcst.CSTNode]], node) if isinstance(matcher, OneOf): # We should compare against each of the sequences in the OneOf @@ -1259,7 +1253,8 @@ def _attribute_matches( # noqa: C901 if result.sequence_capture is not None: return result.sequence_capture elif isinstance(m, MatchIfTrue): - return {} if matcher.func(node) else None + # TODO: return captures + return {} if m.func(node) else None elif isinstance(matcher, AllOf): # We should compare against each of the sequences in the AllOf all_captures = {} @@ -1270,8 +1265,6 @@ def _attribute_matches( # noqa: C901 if result.sequence_capture is None: return None all_captures = {**all_captures, **result.sequence_capture} - elif isinstance(m, MatchIfTrue): - return {} if matcher.func(node) else None else: # The value in the AllOf wasn't a sequence, it can't match. return None @@ -1288,7 +1281,7 @@ def _attribute_matches( # noqa: C901 Union[ BaseMatcherNode, _BaseWildcardNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], DoNotCareSentinel, ] ], @@ -1305,7 +1298,8 @@ def _attribute_matches( # noqa: C901 # so the only way it is wrong is if the node was badly constructed and # types were ignored. return _matches( - cast(Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode], node), + cast(Union[MaybeSentinel, libcst.CSTNode], node), + # pyre-fixme[24]: Generic type `MatchIfTrue` expects 1 type parameter. cast(Union[BaseMatcherNode, MatchIfTrue, _BaseMetadataMatcher], matcher), metadata_lookup, ) @@ -1364,26 +1358,26 @@ def _metadata_matches( # noqa: C901 return None return {} if actual_value == metadata.value else None else: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") def _node_matches( # noqa: C901 node: libcst.CSTNode, matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, _InverseOf[ Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], _ExtractMatchingNode[ Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], @@ -1454,19 +1448,19 @@ def _matches( node: Union[MaybeSentinel, libcst.CSTNode], matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, _InverseOf[ Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], _ExtractMatchingNode[ Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], @@ -1497,11 +1491,13 @@ def _matches( return _node_matches(node, matcher, metadata_lookup) -def _construct_metadata_fetcher_null() -> Callable[ - [meta.ProviderT, libcst.CSTNode], object -]: - def _fetch(*args: object, **kwargs: object) -> object: - return _METADATA_MISSING_SENTINEL +def _construct_metadata_fetcher_null() -> ( + Callable[[meta.ProviderT, libcst.CSTNode], object] +): + def _fetch(provider: meta.ProviderT, node: libcst.CSTNode) -> NoReturn: + raise LookupError( + f"{provider.__name__} is not resolved; did you forget a MetadataWrapper?" + ) return _fetch @@ -1524,7 +1520,11 @@ def _construct_metadata_fetcher_wrapper( if provider not in metadata: metadata[provider] = wrapper.resolve(provider) - return metadata.get(provider, {}).get(node, _METADATA_MISSING_SENTINEL) + node_metadata = metadata[provider].get(node, _METADATA_MISSING_SENTINEL) + if isinstance(node_metadata, LazyValue): + node_metadata = node_metadata() + + return node_metadata return _fetch @@ -1605,12 +1605,12 @@ class _FindAllVisitor(libcst.CSTVisitor): self, matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, _InverseOf[ Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], @@ -1636,7 +1636,7 @@ def _find_or_extract_all( tree: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode, meta.MetadataWrapper], matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, # The inverse clause is left off of the public functions `findall` and # `extractall` because we play a dirty trick. We lie to the typechecker @@ -1647,7 +1647,7 @@ def _find_or_extract_all( _InverseOf[ Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], @@ -1687,9 +1687,7 @@ def _find_or_extract_all( def findall( tree: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode, meta.MetadataWrapper], - matcher: Union[ - BaseMatcherNode, MatchIfTrue[Callable[[object], bool]], _BaseMetadataMatcher - ], + matcher: Union[BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher], *, metadata_resolver: Optional[ Union[libcst.MetadataDependent, libcst.MetadataWrapper] @@ -1713,7 +1711,7 @@ def findall( or a :class:`OneOf`/:class:`AllOf` special matcher. Unlike :func:`matches`, it can also be a :class:`MatchIfTrue` or :func:`DoesNotMatch` matcher, since we are traversing the tree looking for matches. It cannot be a :class:`AtLeastN` or - :class:`AtMostN` matcher because these types are wildcards which can only be usedi + :class:`AtMostN` matcher because these types are wildcards which can only be used inside sequences. """ nodes, _ = _find_or_extract_all(tree, matcher, metadata_resolver=metadata_resolver) @@ -1722,9 +1720,7 @@ def findall( def extractall( tree: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode, meta.MetadataWrapper], - matcher: Union[ - BaseMatcherNode, MatchIfTrue[Callable[[object], bool]], _BaseMetadataMatcher - ], + matcher: Union[BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher], *, metadata_resolver: Optional[ Union[libcst.MetadataDependent, libcst.MetadataWrapper] @@ -1764,12 +1760,12 @@ class _ReplaceTransformer(libcst.CSTTransformer): self, matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, _InverseOf[ Union[ BaseMatcherNode, - MatchIfTrue[Callable[[object], bool]], + MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher, ] ], @@ -1799,11 +1795,9 @@ class _ReplaceTransformer(libcst.CSTTransformer): ] if inspect.isfunction(replacement): - # pyre-ignore Pyre knows replacement is a function, but somehow drops - # the type hint from the init signature. self.replacement = replacement elif isinstance(replacement, (MaybeSentinel, RemovalSentinel)): - self.replacement = lambda node, matches: copy.deepcopy(replacement) + self.replacement = lambda node, matches: replacement else: # pyre-ignore We know this is a CSTNode. self.replacement = lambda node, matches: replacement.deep_clone() @@ -1866,9 +1860,7 @@ class _ReplaceTransformer(libcst.CSTTransformer): def replace( tree: Union[MaybeSentinel, RemovalSentinel, libcst.CSTNode, meta.MetadataWrapper], - matcher: Union[ - BaseMatcherNode, MatchIfTrue[Callable[[object], bool]], _BaseMetadataMatcher - ], + matcher: Union[BaseMatcherNode, MatchIfTrue[libcst.CSTNode], _BaseMetadataMatcher], replacement: Union[ MaybeSentinel, RemovalSentinel, @@ -1920,7 +1912,7 @@ def replace( """ if isinstance(tree, (RemovalSentinel, MaybeSentinel)): # We can't do any replacements on this, so return the tree exactly. - return copy.deepcopy(tree) + return tree if isinstance(matcher, (AtLeastN, AtMostN)): # We can't match this, since these matchers are forbidden at top level. # These are not subclasses of BaseMatcherNode, but in the case that the @@ -1930,7 +1922,7 @@ def replace( elif isinstance(tree, meta.MetadataWrapper): return tree.module.deep_clone() else: - raise Exception("Logic error!") + raise CSTLogicError("Logic error!") if isinstance(tree, meta.MetadataWrapper) and metadata_resolver is None: # Provide a convenience for calling replace directly on a MetadataWrapper. @@ -1944,4 +1936,8 @@ def replace( fetcher = _construct_metadata_fetcher_dependent(metadata_resolver) replacer = _ReplaceTransformer(matcher, fetcher, replacement) - return tree.visit(replacer) + new_tree = tree.visit(replacer) + if isinstance(new_tree, FlattenSentinel): + # The above transform never returns FlattenSentinel, so this isn't possible + raise CSTLogicError("Logic error, cannot get a FlattenSentinel here!") + return new_tree diff --git a/libcst/matchers/_return_types.py b/libcst/matchers/_return_types.py index d8a22986..2f050088 100644 --- a/libcst/matchers/_return_types.py +++ b/libcst/matchers/_return_types.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -20,6 +20,7 @@ from libcst._nodes.expression import ( BaseExpression, BaseFormattedStringContent, BaseSlice, + BaseTemplatedStringContent, BinaryOperation, BooleanOperation, Call, @@ -66,11 +67,15 @@ from libcst._nodes.expression import ( StarredElement, Subscript, SubscriptElement, + TemplatedString, + TemplatedStringExpression, + TemplatedStringText, Tuple, UnaryOperation, Yield, ) from libcst._nodes.module import Module + from libcst._nodes.op import ( Add, AddAssign, @@ -143,6 +148,7 @@ from libcst._nodes.statement import ( Del, Else, ExceptHandler, + ExceptStarHandler, Expr, Finally, For, @@ -153,14 +159,38 @@ from libcst._nodes.statement import ( ImportAlias, ImportFrom, IndentedBlock, + Match, + MatchAs, + MatchCase, + MatchClass, + MatchKeywordElement, + MatchList, + MatchMapping, + MatchMappingElement, + MatchOr, + MatchOrElement, + MatchPattern, + MatchSequence, + MatchSequenceElement, + MatchSingleton, + MatchStar, + MatchTuple, + MatchValue, NameItem, Nonlocal, + ParamSpec, Pass, Raise, Return, SimpleStatementLine, SimpleStatementSuite, Try, + TryStar, + TypeAlias, + TypeParam, + TypeParameters, + TypeVar, + TypeVarTuple, While, With, WithItem, @@ -197,7 +227,7 @@ TYPED_FUNCTION_RETURN_MAPPING: TypingDict[Type[CSTNode], object] = { BitAnd: BaseBinaryOp, BitAndAssign: BaseAugOp, BitInvert: BaseUnaryOp, - BitOr: BaseBinaryOp, + BitOr: Union[BaseBinaryOp, MaybeSentinel], BitOrAssign: BaseAugOp, BitXor: BaseBinaryOp, BitXorAssign: BaseAugOp, @@ -228,6 +258,7 @@ TYPED_FUNCTION_RETURN_MAPPING: TypingDict[Type[CSTNode], object] = { EmptyLine: Union[EmptyLine, RemovalSentinel], Equal: BaseCompOp, ExceptHandler: Union[ExceptHandler, RemovalSentinel], + ExceptStarHandler: Union[ExceptStarHandler, RemovalSentinel], Expr: Union[BaseSmallStatement, RemovalSentinel], Finally: Finally, Float: BaseExpression, @@ -266,6 +297,23 @@ TYPED_FUNCTION_RETURN_MAPPING: TypingDict[Type[CSTNode], object] = { LessThanEqual: BaseCompOp, List: BaseExpression, ListComp: BaseExpression, + Match: Union[BaseStatement, RemovalSentinel], + MatchAs: MatchPattern, + MatchCase: MatchCase, + MatchClass: MatchPattern, + MatchKeywordElement: Union[MatchKeywordElement, RemovalSentinel], + MatchList: MatchPattern, + MatchMapping: MatchPattern, + MatchMappingElement: Union[MatchMappingElement, RemovalSentinel], + MatchOr: MatchPattern, + MatchOrElement: Union[MatchOrElement, RemovalSentinel], + MatchPattern: MatchPattern, + MatchSequence: MatchPattern, + MatchSequenceElement: Union[MatchSequenceElement, RemovalSentinel], + MatchSingleton: MatchPattern, + MatchStar: MatchStar, + MatchTuple: MatchPattern, + MatchValue: MatchPattern, MatrixMultiply: BaseBinaryOp, MatrixMultiplyAssign: BaseAugOp, Minus: BaseUnaryOp, @@ -285,6 +333,7 @@ TYPED_FUNCTION_RETURN_MAPPING: TypingDict[Type[CSTNode], object] = { Or: BaseBooleanOp, Param: Union[Param, MaybeSentinel, RemovalSentinel], ParamSlash: Union[ParamSlash, MaybeSentinel], + ParamSpec: ParamSpec, ParamStar: Union[ParamStar, MaybeSentinel], Parameters: Parameters, ParenthesizedWhitespace: Union[BaseParenthesizableWhitespace, MaybeSentinel], @@ -308,14 +357,23 @@ TYPED_FUNCTION_RETURN_MAPPING: TypingDict[Type[CSTNode], object] = { SimpleWhitespace: Union[BaseParenthesizableWhitespace, MaybeSentinel], Slice: BaseSlice, StarredDictElement: Union[BaseDictElement, RemovalSentinel], - StarredElement: Union[BaseElement, RemovalSentinel], + StarredElement: BaseExpression, Subscript: BaseExpression, SubscriptElement: Union[SubscriptElement, RemovalSentinel], Subtract: BaseBinaryOp, SubtractAssign: BaseAugOp, + TemplatedString: BaseExpression, + TemplatedStringExpression: Union[BaseTemplatedStringContent, RemovalSentinel], + TemplatedStringText: Union[BaseTemplatedStringContent, RemovalSentinel], TrailingWhitespace: TrailingWhitespace, Try: Union[BaseStatement, RemovalSentinel], + TryStar: Union[BaseStatement, RemovalSentinel], Tuple: BaseExpression, + TypeAlias: Union[BaseSmallStatement, RemovalSentinel], + TypeParam: Union[TypeParam, RemovalSentinel], + TypeParameters: TypeParameters, + TypeVar: TypeVar, + TypeVarTuple: TypeVarTuple, UnaryOperation: BaseExpression, While: Union[BaseStatement, RemovalSentinel], With: Union[BaseStatement, RemovalSentinel], diff --git a/libcst/matchers/_visitors.py b/libcst/matchers/_visitors.py index 301e675a..b9252173 100644 --- a/libcst/matchers/_visitors.py +++ b/libcst/matchers/_visitors.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,7 +7,9 @@ from inspect import ismethod, signature from typing import ( Any, Callable, + cast, Dict, + get_type_hints, List, Optional, Sequence, @@ -15,8 +17,6 @@ from typing import ( Tuple, Type, Union, - cast, - get_type_hints, ) import libcst as cst @@ -33,18 +33,26 @@ from libcst.matchers._matcher_base import ( AtLeastN, AtMostN, BaseMatcherNode, - MatchIfTrue, - MatchMetadata, - MatchMetadataIfTrue, - OneOf, extract, extractall, findall, matches, + MatchIfTrue, + MatchMetadata, + MatchMetadataIfTrue, + OneOf, replace, ) from libcst.matchers._return_types import TYPED_FUNCTION_RETURN_MAPPING +try: + # PEP 604 unions, in Python 3.10+ + from types import UnionType +except ImportError: + # We use this for isinstance; no annotation will be an instance of this + class UnionType: + pass + CONCRETE_METHODS: Set[str] = { *{f"visit_{cls.__name__}" for cls in TYPED_FUNCTION_RETURN_MAPPING}, @@ -52,6 +60,11 @@ CONCRETE_METHODS: Set[str] = { } +def is_property(obj: object, attr_name: str) -> bool: + """Check if obj.attr is a property without evaluating it.""" + return isinstance(getattr(type(obj), attr_name, None), property) + + # pyre-ignore We don't care about Any here, its not exposed. def _match_decorator_unpickler(kwargs: Any) -> "MatchDecoratorMismatch": return MatchDecoratorMismatch(**kwargs) @@ -79,8 +92,15 @@ def _get_possible_match_classes(matcher: BaseMatcherNode) -> List[Type[cst.CSTNo return [getattr(cst, matcher.__class__.__name__)] +def _annotation_is_union(annotation: object) -> bool: + return ( + isinstance(annotation, UnionType) + or getattr(annotation, "__origin__", None) is Union + ) + + def _get_possible_annotated_classes(annotation: object) -> List[Type[object]]: - if getattr(annotation, "__origin__", None) is Union: + if _annotation_is_union(annotation): return getattr(annotation, "__args__", []) else: return [cast(Type[object], annotation)] @@ -228,7 +248,6 @@ def _check_types( ) if has_invalid_top_level: raise MatchDecoratorMismatch( - # pyre-ignore This anonymous method has a qualname. meth.__qualname__, "The root matcher in a matcher decorator cannot be an " + "AtLeastN, AtMostN or MatchIfTrue matcher", @@ -251,20 +270,22 @@ def _check_types( ) -def _gather_matchers(obj: object) -> Set[BaseMatcherNode]: - visit_matchers: Set[BaseMatcherNode] = set() +def _gather_matchers(obj: object) -> Dict[BaseMatcherNode, Optional[cst.CSTNode]]: + """ + Set of gating matchers that we need to track and evaluate. We use these + in conjunction with the call_if_inside and call_if_not_inside decorators + to determine whether to call a visit/leave function. + """ - for func in dir(obj): - try: - for matcher in getattr(getattr(obj, func), VISIT_POSITIVE_MATCHER_ATTR, []): - visit_matchers.add(cast(BaseMatcherNode, matcher)) - for matcher in getattr(getattr(obj, func), VISIT_NEGATIVE_MATCHER_ATTR, []): - visit_matchers.add(cast(BaseMatcherNode, matcher)) - except Exception: - # This could be a caculated property, and calling getattr() evaluates it. - # We have no control over the implementation detail, so if it raises, we - # should not crash. - pass + visit_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = {} + + for attr_name in dir(obj): + if not is_property(obj, attr_name): + func = getattr(obj, attr_name) + for matcher in getattr(func, VISIT_POSITIVE_MATCHER_ATTR, []): + visit_matchers[cast(BaseMatcherNode, matcher)] = None + for matcher in getattr(func, VISIT_NEGATIVE_MATCHER_ATTR, []): + visit_matchers[cast(BaseMatcherNode, matcher)] = None return visit_matchers @@ -274,7 +295,6 @@ def _assert_not_concrete( ) -> None: if func.__name__ in CONCRETE_METHODS: raise MatchDecoratorMismatch( - # pyre-ignore This anonymous method has a qualname. func.__qualname__, f"@{decorator_name} should not decorate functions that are concrete " + "visit or leave methods.", @@ -289,16 +309,12 @@ def _gather_constructed_visit_funcs( ] = {} for funcname in dir(obj): - try: - possible_func = getattr(obj, funcname) - if not ismethod(possible_func): - continue - func = cast(Callable[[cst.CSTNode], None], possible_func) - except Exception: - # This could be a caculated property, and calling getattr() evaluates it. - # We have no control over the implementation detail, so if it raises, we - # should not crash. + if is_property(obj, funcname): continue + possible_func = getattr(obj, funcname) + if not ismethod(possible_func): + continue + func = cast(Callable[[cst.CSTNode], None], possible_func) matchers = getattr(func, CONSTRUCTED_VISIT_MATCHER_ATTR, []) if matchers: # Make sure that we aren't accidentally putting a @visit on a visit_Node. @@ -324,16 +340,12 @@ def _gather_constructed_leave_funcs( ] = {} for funcname in dir(obj): - try: - possible_func = getattr(obj, funcname) - if not ismethod(possible_func): - continue - func = cast(Callable[[cst.CSTNode], None], possible_func) - except Exception: - # This could be a caculated property, and calling getattr() evaluates it. - # We have no control over the implementation detail, so if it raises, we - # should not crash. + if is_property(obj, funcname): continue + possible_func = getattr(obj, funcname) + if not ismethod(possible_func): + continue + func = cast(Callable[[cst.CSTNode], None], possible_func) matchers = getattr(func, CONSTRUCTED_LEAVE_MATCHER_ATTR, []) if matchers: # Make sure that we aren't accidentally putting a @leave on a leave_Node. @@ -435,12 +447,7 @@ class MatcherDecoratableTransformer(CSTTransformer): def __init__(self) -> None: CSTTransformer.__init__(self) - # List of gating matchers that we need to track and evaluate. We use these - # in conjuction with the call_if_inside and call_if_not_inside decorators - # to determine whether or not to call a visit/leave function. - self._matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = { - m: None for m in _gather_matchers(self) - } + self.__matchers: Optional[Dict[BaseMatcherNode, Optional[cst.CSTNode]]] = None # Mapping of matchers to functions. If in the course of visiting the tree, # a node matches one of these matchers, the corresponding function will be # called as if it was a visit_* method. @@ -473,6 +480,16 @@ class MatcherDecoratableTransformer(CSTTransformer): expected_none_return=False, ) + @property + def _matchers(self) -> Dict[BaseMatcherNode, Optional[cst.CSTNode]]: + if self.__matchers is None: + self.__matchers = _gather_matchers(self) + return self.__matchers + + @_matchers.setter + def _matchers(self, value: Dict[BaseMatcherNode, Optional[cst.CSTNode]]) -> None: + self.__matchers = value + def on_visit(self, node: cst.CSTNode) -> bool: # First, evaluate any matchers that we have which we are not inside already. self._matchers = _visit_matchers(self._matchers, node, self) @@ -561,7 +578,7 @@ class MatcherDecoratableTransformer(CSTTransformer): tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[..., bool]], + MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], @@ -594,7 +611,7 @@ class MatcherDecoratableTransformer(CSTTransformer): tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[..., bool]], + MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], @@ -613,7 +630,7 @@ class MatcherDecoratableTransformer(CSTTransformer): tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[..., bool]], + MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], @@ -647,12 +664,7 @@ class MatcherDecoratableVisitor(CSTVisitor): def __init__(self) -> None: CSTVisitor.__init__(self) - # List of gating matchers that we need to track and evaluate. We use these - # in conjuction with the call_if_inside and call_if_not_inside decorators - # to determine whether or not to call a visit/leave function. - self._matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = { - m: None for m in _gather_matchers(self) - } + self.__matchers: Optional[Dict[BaseMatcherNode, Optional[cst.CSTNode]]] = None # Mapping of matchers to functions. If in the course of visiting the tree, # a node matches one of these matchers, the corresponding function will be # called as if it was a visit_* method. @@ -680,6 +692,16 @@ class MatcherDecoratableVisitor(CSTVisitor): expected_none_return=True, ) + @property + def _matchers(self) -> Dict[BaseMatcherNode, Optional[cst.CSTNode]]: + if self.__matchers is None: + self.__matchers = _gather_matchers(self) + return self.__matchers + + @_matchers.setter + def _matchers(self, value: Dict[BaseMatcherNode, Optional[cst.CSTNode]]) -> None: + self.__matchers = value + def on_visit(self, node: cst.CSTNode) -> bool: # First, evaluate any matchers that we have which we are not inside already. self._matchers = _visit_matchers(self._matchers, node, self) @@ -756,7 +778,7 @@ class MatcherDecoratableVisitor(CSTVisitor): tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[..., bool]], + MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], @@ -789,7 +811,7 @@ class MatcherDecoratableVisitor(CSTVisitor): tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[..., bool]], + MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], @@ -808,7 +830,7 @@ class MatcherDecoratableVisitor(CSTVisitor): tree: Union[cst.MaybeSentinel, cst.RemovalSentinel, cst.CSTNode], matcher: Union[ BaseMatcherNode, - MatchIfTrue[Callable[..., bool]], + MatchIfTrue[cst.CSTNode], MatchMetadata, MatchMetadataIfTrue, ], diff --git a/libcst/matchers/tests/__init__.py b/libcst/matchers/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/matchers/tests/__init__.py +++ b/libcst/matchers/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/matchers/tests/test_decorators.py b/libcst/matchers/tests/test_decorators.py index c102f2ab..8b28657c 100644 --- a/libcst/matchers/tests/test_decorators.py +++ b/libcst/matchers/tests/test_decorators.py @@ -1,20 +1,22 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. +import sys from ast import literal_eval from textwrap import dedent from typing import List, Set +from unittest import skipIf import libcst as cst import libcst.matchers as m from libcst.matchers import ( - MatcherDecoratableTransformer, - MatcherDecoratableVisitor, call_if_inside, call_if_not_inside, leave, + MatcherDecoratableTransformer, + MatcherDecoratableVisitor, visit, ) from libcst.testing.utils import UnitTest @@ -993,3 +995,17 @@ class MatchersVisitLeaveDecoratorsTest(UnitTest): # We should have only visited a select number of nodes. self.assertEqual(visitor.visits, ['"baz"']) + + +class MatchersUnionDecoratorsTest(UnitTest): + @skipIf(bool(sys.version_info < (3, 10)), "new union syntax not available") + def test_init_with_new_union_annotation(self) -> None: + class TransformerWithUnionReturnAnnotation(m.MatcherDecoratableTransformer): + @m.leave(m.ImportFrom(module=m.Name(value="typing"))) + def test( + self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom + ) -> cst.ImportFrom | cst.RemovalSentinel: + pass + + # assert that init (specifically _check_types on return annotation) passes + TransformerWithUnionReturnAnnotation() diff --git a/libcst/matchers/tests/test_extract.py b/libcst/matchers/tests/test_extract.py index 77c134a8..50a24c27 100644 --- a/libcst/matchers/tests/test_extract.py +++ b/libcst/matchers/tests/test_extract.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -408,9 +408,11 @@ class MatchersExtractTest(UnitTest): ] ), ) - extracted_seq = cst.ensure_type( - cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call - ).args + extracted_seq = tuple( + cst.ensure_type( + cst.ensure_type(expression, cst.Tuple).elements[1].value, cst.Call + ).args + ) self.assertEqual(nodes, {"args": extracted_seq}) # Verify false behavior diff --git a/libcst/matchers/tests/test_findall.py b/libcst/matchers/tests/test_findall.py index 95233f53..6e81e481 100644 --- a/libcst/matchers/tests/test_findall.py +++ b/libcst/matchers/tests/test_findall.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -103,14 +103,17 @@ class MatchersFindAllTest(UnitTest): ], ) - # Test that failing to provide metadata leads to no match - booleans = findall( - wrapper.module, - m.MatchMetadata( - meta.ExpressionContextProvider, meta.ExpressionContext.STORE - ), - ) - self.assertNodeSequenceEqual(booleans, []) + # Test that failing to provide metadata leads to raising an informative exception + with self.assertRaises( + LookupError, + msg="ExpressionContextProvider is not resolved; did you forget a MetadataWrapper?", + ): + booleans = findall( + wrapper.module, + m.MatchMetadata( + meta.ExpressionContextProvider, meta.ExpressionContext.STORE + ), + ) def test_findall_with_visitors(self) -> None: # Find all assignments in a tree diff --git a/libcst/matchers/tests/test_matchers.py b/libcst/matchers/tests/test_matchers.py index 11d6b5f5..e41bd866 100644 --- a/libcst/matchers/tests/test_matchers.py +++ b/libcst/matchers/tests/test_matchers.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -291,6 +291,13 @@ class MatchersMatcherTest(UnitTest): self.assertTrue( matches(cst.Name("True"), m.OneOf(m.Name("True"), m.Name("False"))) ) + # Match when one of the option is a TypeOf + self.assertTrue( + matches( + cst.Name("True"), + m.OneOf(m.TypeOf(m.Name, m.NameItem)("True"), m.Name("False")), + ) + ) # Match any assignment that assigns a value of True or False to an # unspecified target. self.assertTrue( diff --git a/libcst/matchers/tests/test_matchers_with_metadata.py b/libcst/matchers/tests/test_matchers_with_metadata.py index a41913d9..63530c37 100644 --- a/libcst/matchers/tests/test_matchers_with_metadata.py +++ b/libcst/matchers/tests/test_matchers_with_metadata.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -366,14 +366,18 @@ class MatchersMetadataTest(UnitTest): ) ) - def test_lambda_metadata_matcher_with_no_metadata(self) -> None: + def test_lambda_metadata_matcher_with_unresolved_metadata(self) -> None: # Match on qualified name provider module = cst.parse_module( "from typing import List\n\ndef foo() -> None: pass\n" ) functiondef = cst.ensure_type(module.body[1], cst.FunctionDef) - self.assertFalse( + # Test that when the metadata is unresolved, raise an informative exception. + with self.assertRaises( + LookupError, + msg="QualifiedNameProvider is not resolved; did you forget a MetadataWrapper?", + ): matches( functiondef, m.FunctionDef( @@ -385,6 +389,24 @@ class MatchersMetadataTest(UnitTest): ) ), ) + + def test_lambda_metadata_matcher_with_no_metadata(self) -> None: + class VoidProvider(meta.BatchableMetadataProvider[object]): + """A dummy metadata provider""" + + module = cst.parse_module( + "from typing import List\n\ndef foo() -> None: pass\n" + ) + wrapper = cst.MetadataWrapper(module) + functiondef = cst.ensure_type(wrapper.module.body[1], cst.FunctionDef) + + # Test that when the node has no corresponding metadata, there is no match. + self.assertFalse( + matches( + functiondef, + m.FunctionDef(name=m.MatchMetadataIfTrue(VoidProvider, lambda _: True)), + metadata_resolver=wrapper, + ) ) def test_lambda_metadata_matcher_operators(self) -> None: diff --git a/libcst/matchers/tests/test_replace.py b/libcst/matchers/tests/test_replace.py index cc922446..db9674f5 100644 --- a/libcst/matchers/tests/test_replace.py +++ b/libcst/matchers/tests/test_replace.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/matchers/tests/test_visitors.py b/libcst/matchers/tests/test_visitors.py index 3fc2c658..2c059921 100644 --- a/libcst/matchers/tests/test_visitors.py +++ b/libcst/matchers/tests/test_visitors.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,10 +9,10 @@ from typing import Union import libcst as cst import libcst.matchers as m from libcst.matchers import ( + leave, MatchDecoratorMismatch, MatcherDecoratableTransformer, MatcherDecoratableVisitor, - leave, visit, ) from libcst.testing.utils import UnitTest diff --git a/libcst/metadata/__init__.py b/libcst/metadata/__init__.py index 2e70e9df..ecc42741 100644 --- a/libcst/metadata/__init__.py +++ b/libcst/metadata/__init__.py @@ -1,10 +1,11 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst._position import CodePosition, CodeRange +from libcst.metadata.accessor_provider import AccessorProvider from libcst.metadata.base_provider import ( BaseMetadataProvider, BatchableMetadataProvider, @@ -15,7 +16,12 @@ from libcst.metadata.expression_context_provider import ( ExpressionContext, ExpressionContextProvider, ) +from libcst.metadata.file_path_provider import FilePathProvider from libcst.metadata.full_repo_manager import FullRepoManager +from libcst.metadata.name_provider import ( + FullyQualifiedNameProvider, + QualifiedNameProvider, +) from libcst.metadata.parent_node_provider import ParentNodeProvider from libcst.metadata.position_provider import ( PositionProvider, @@ -32,12 +38,13 @@ from libcst.metadata.scope_provider import ( Assignments, BaseAssignment, BuiltinAssignment, + BuiltinScope, ClassScope, ComprehensionScope, FunctionScope, GlobalScope, + ImportAssignment, QualifiedName, - QualifiedNameProvider, QualifiedNameSource, Scope, ScopeProvider, @@ -46,7 +53,6 @@ from libcst.metadata.span_provider import ByteSpanPositionProvider, CodeSpan from libcst.metadata.type_inference_provider import TypeInferenceProvider from libcst.metadata.wrapper import MetadataWrapper - __all__ = [ "CodePosition", "CodeRange", @@ -60,6 +66,8 @@ __all__ = [ "BaseAssignment", "Assignment", "BuiltinAssignment", + "ImportAssignment", + "BuiltinScope", "Access", "Scope", "GlobalScope", @@ -74,11 +82,14 @@ __all__ = [ "BatchableMetadataProvider", "VisitorMetadataProvider", "QualifiedNameProvider", + "FullyQualifiedNameProvider", "ProviderT", "Assignments", "Accesses", "TypeInferenceProvider", "FullRepoManager", + "AccessorProvider", + "FilePathProvider", # Experimental APIs: "ExperimentalReentrantCodegenProvider", "CodegenPartial", diff --git a/libcst/metadata/accessor_provider.py b/libcst/metadata/accessor_provider.py new file mode 100644 index 00000000..5d4f22e4 --- /dev/null +++ b/libcst/metadata/accessor_provider.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +import dataclasses + +import libcst as cst + +from libcst.metadata.base_provider import VisitorMetadataProvider + + +class AccessorProvider(VisitorMetadataProvider[str]): + def on_visit(self, node: cst.CSTNode) -> bool: + for f in dataclasses.fields(node): + child = getattr(node, f.name) + self.set_metadata(child, f.name) + return True diff --git a/libcst/metadata/base_provider.py b/libcst/metadata/base_provider.py index b2910f5c..5d93fbe8 100644 --- a/libcst/metadata/base_provider.py +++ b/libcst/metadata/base_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,30 +6,30 @@ from pathlib import Path from types import MappingProxyType from typing import ( - TYPE_CHECKING, - Callable, Generic, List, Mapping, MutableMapping, Optional, + Protocol, Type, + TYPE_CHECKING, TypeVar, - cast, + Union, ) from libcst._batched_visitor import BatchableCSTVisitor from libcst._metadata_dependent import ( _T as _MetadataT, _UNDEFINED_DEFAULT, + LazyValue, MetadataDependent, ) from libcst._visitors import CSTVisitor - if TYPE_CHECKING: from libcst._nodes.base import CSTNode - from libcst._nodes.module import Module, _ModuleSelfT as _ModuleT + from libcst._nodes.module import _ModuleSelfT as _ModuleT, Module from libcst.metadata.wrapper import MetadataWrapper @@ -37,6 +37,18 @@ ProviderT = Type["BaseMetadataProvider[object]"] # BaseMetadataProvider[int] would be a subtype of BaseMetadataProvider[object], so the # typevar is covariant. _ProvidedMetadataT = TypeVar("_ProvidedMetadataT", covariant=True) +MaybeLazyMetadataT = Union[LazyValue[_ProvidedMetadataT], _ProvidedMetadataT] + + +class GenCacheMethod(Protocol): + def __call__( + self, + root_path: Path, + paths: List[str], + *, + timeout: Optional[int] = None, + use_pyproject_toml: bool = False, + ) -> Mapping[str, object]: ... # We can't use an ABCMeta here, because of metaclass conflicts @@ -53,26 +65,26 @@ class BaseMetadataProvider(MetadataDependent, Generic[_ProvidedMetadataT]): # # N.B. This has some typing variance problems. See `set_metadata` for an # explanation. - _computed: MutableMapping["CSTNode", _ProvidedMetadataT] + _computed: MutableMapping["CSTNode", MaybeLazyMetadataT] - #: Implement gen_cache to indicate the matadata provider depends on cache from external + #: Implement gen_cache to indicate the metadata provider depends on cache from external #: system. This function will be called by :class:`~libcst.metadata.FullRepoManager` #: to compute required cache object per file path. - gen_cache: Optional[Callable[[Path, List[str], int], Mapping[str, object]]] = None + gen_cache: Optional[GenCacheMethod] = None def __init__(self, cache: object = None) -> None: super().__init__() - self._computed = {} + self._computed: MutableMapping["CSTNode", MaybeLazyMetadataT] = {} if self.gen_cache and cache is None: # The metadata provider implementation is responsible to store and use cache. - raise Exception( + raise ValueError( f"Cache is required for initializing {self.__class__.__name__}." ) self.cache = cache def _gen( self, wrapper: "MetadataWrapper" - ) -> Mapping["CSTNode", _ProvidedMetadataT]: + ) -> Mapping["CSTNode", MaybeLazyMetadataT]: """ Resolves and returns metadata mapping for the module in ``wrapper``. @@ -94,11 +106,7 @@ class BaseMetadataProvider(MetadataDependent, Generic[_ProvidedMetadataT]): """ ... - # pyre-ignore[46]: The covariant `value` isn't type-safe because we write it to - # pyre: `self._computed`, however we assume that only one subclass in the MRO chain - # pyre: will ever call `set_metadata`, so it's okay for our purposes. There's no - # pyre: sane way to redesign this API so that it doesn't have this problem. - def set_metadata(self, node: "CSTNode", value: _ProvidedMetadataT) -> None: + def set_metadata(self, node: "CSTNode", value: MaybeLazyMetadataT) -> None: """ Record a metadata value ``value`` for ``node``. """ @@ -108,7 +116,9 @@ class BaseMetadataProvider(MetadataDependent, Generic[_ProvidedMetadataT]): self, key: Type["BaseMetadataProvider[_MetadataT]"], node: "CSTNode", - default: _MetadataT = _UNDEFINED_DEFAULT, + default: Union[ + MaybeLazyMetadataT, Type[_UNDEFINED_DEFAULT] + ] = _UNDEFINED_DEFAULT, ) -> _MetadataT: """ The same method as :func:`~libcst.MetadataDependent.get_metadata` except @@ -117,9 +127,12 @@ class BaseMetadataProvider(MetadataDependent, Generic[_ProvidedMetadataT]): """ if key is type(self): if default is not _UNDEFINED_DEFAULT: - return cast(_MetadataT, self._computed.get(node, default)) + ret = self._computed.get(node, default) else: - return cast(_MetadataT, self._computed[node]) + ret = self._computed[node] + if isinstance(ret, LazyValue): + return ret() + return ret return super().get_metadata(key, node, default) diff --git a/libcst/metadata/expression_context_provider.py b/libcst/metadata/expression_context_provider.py index b06ba113..955c14ad 100644 --- a/libcst/metadata/expression_context_provider.py +++ b/libcst/metadata/expression_context_provider.py @@ -1,10 +1,10 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from enum import Enum, auto +from enum import auto, Enum from typing import Optional, Sequence import libcst as cst @@ -84,6 +84,13 @@ class ExpressionContextVisitor(cst.CSTVisitor): node.value.visit(self) return False + def visit_NamedExpr(self, node: cst.NamedExpr) -> bool: + node.target.visit( + ExpressionContextVisitor(self.provider, ExpressionContext.STORE) + ) + node.value.visit(self) + return False + def visit_Name(self, node: cst.Name) -> bool: self.provider.set_metadata(node, self.context) return False @@ -194,7 +201,7 @@ class ExpressionContextVisitor(cst.CSTVisitor): return False -class ExpressionContextProvider(BatchableMetadataProvider[Optional[ExpressionContext]]): +class ExpressionContextProvider(BatchableMetadataProvider[ExpressionContext]): """ Provides :class:`ExpressionContext` metadata (mimics the `expr_context `__ in ast) for the @@ -202,9 +209,9 @@ class ExpressionContextProvider(BatchableMetadataProvider[Optional[ExpressionCon :class:`~libcst.Attribute`, :class:`~libcst.Subscript`, :class:`~libcst.StarredElement` , :class:`~libcst.List`, :class:`~libcst.Tuple` and :class:`~libcst.Name`. - Not that a :class:`~libcst.Name` may not always has context because of the differences between + Note that a :class:`~libcst.Name` may not always have context because of the differences between ast and LibCST. E.g. :attr:`~libcst.Attribute.attr` is a :class:`~libcst.Name` in LibCST - but a str in ast. To honor ast implementation, we don't assignment context to + but a str in ast. To honor ast implementation, we don't assign context to :attr:`~libcst.Attribute.attr`. diff --git a/libcst/metadata/file_path_provider.py b/libcst/metadata/file_path_provider.py new file mode 100644 index 00000000..6ab01b5f --- /dev/null +++ b/libcst/metadata/file_path_provider.py @@ -0,0 +1,55 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from pathlib import Path +from typing import Any, List, Mapping, Optional + +import libcst as cst +from libcst.metadata.base_provider import BatchableMetadataProvider + + +class FilePathProvider(BatchableMetadataProvider[Path]): + """ + Provides the path to the current file on disk as metadata for the root + :class:`~libcst.Module` node. Requires a :class:`~libcst.metadata.FullRepoManager`. + The returned path will always be resolved to an absolute path using + :func:`pathlib.Path.resolve`. + + Example usage: + + .. code:: python + + class CustomVisitor(CSTVisitor): + METADATA_DEPENDENCIES = [FilePathProvider] + + path: pathlib.Path + + def visit_Module(self, node: libcst.Module) -> None: + self.path = self.get_metadata(FilePathProvider, node) + + .. code:: + + >>> mgr = FullRepoManager(".", {"libcst/_types.py"}, {FilePathProvider}) + >>> wrapper = mgr.get_metadata_wrapper_for_path("libcst/_types.py") + >>> fqnames = wrapper.resolve(FilePathProvider) + >>> {type(k): v for k, v in wrapper.resolve(FilePathProvider).items()} + {: PosixPath('/home/user/libcst/_types.py')} + + """ + + @classmethod + def gen_cache( + cls, root_path: Path, paths: List[str], **kwargs: Any + ) -> Mapping[str, Path]: + cache = {path: (root_path / path).resolve() for path in paths} + return cache + + def __init__(self, cache: Path) -> None: + super().__init__(cache) + self.path: Path = cache + + def visit_Module(self, node: cst.Module) -> Optional[bool]: + self.set_metadata(node, self.path) + return False diff --git a/libcst/metadata/full_repo_manager.py b/libcst/metadata/full_repo_manager.py index 4d41140a..ab6430d8 100644 --- a/libcst/metadata/full_repo_manager.py +++ b/libcst/metadata/full_repo_manager.py @@ -1,16 +1,16 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path -from typing import TYPE_CHECKING, Collection, Dict, List, Mapping +from typing import Collection, Dict, List, Mapping, TYPE_CHECKING import libcst as cst +from libcst._types import StrPath from libcst.metadata.wrapper import MetadataWrapper - if TYPE_CHECKING: from libcst.metadata.base_provider import ProviderT # noqa: F401 @@ -18,10 +18,11 @@ if TYPE_CHECKING: class FullRepoManager: def __init__( self, - repo_root_dir: str, + repo_root_dir: StrPath, paths: Collection[str], providers: Collection["ProviderT"], timeout: int = 5, + use_pyproject_toml: bool = False, ) -> None: """ Given project root directory with pyre and watchman setup, :class:`~libcst.metadata.FullRepoManager` @@ -29,14 +30,16 @@ class FullRepoManager: metadata provider like :class:`~libcst.metadata.TypeInferenceProvider`. :param paths: a collection of paths to access full repository data. - :param providers: a collection of metadata provider classes require accessing full repository - data, currently supports :class:`~libcst.metadata.TypeInferenceProvider`. + :param providers: a collection of metadata provider classes require accessing full repository data, currently supports + :class:`~libcst.metadata.TypeInferenceProvider` and + :class:`~libcst.metadata.FullyQualifiedNameProvider`. :param timeout: number of seconds. Raises `TimeoutExpired `_ when timeout. """ self.root_path: Path = Path(repo_root_dir) self._cache: Dict["ProviderT", Mapping[str, object]] = {} self._timeout = timeout + self._use_pyproject_toml = use_pyproject_toml self._providers = providers self._paths: List[str] = list(paths) @@ -64,7 +67,10 @@ class FullRepoManager: handler = provider.gen_cache if handler: cache[provider] = handler( - self.root_path, self._paths, self._timeout + self.root_path, + self._paths, + timeout=self._timeout, + use_pyproject_toml=self._use_pyproject_toml, ) self._cache = cache @@ -79,7 +85,7 @@ class FullRepoManager: MetadataWrapper(module, cache=manager.get_cache_for_path("a.py")) """ if path not in self._paths: - raise Exception( + raise ValueError( "The path needs to be in paths parameter when constructing FullRepoManager for efficient batch processing." ) # Make sure that the cache is available to us. If the user called diff --git a/libcst/metadata/name_provider.py b/libcst/metadata/name_provider.py new file mode 100644 index 00000000..7de76eb5 --- /dev/null +++ b/libcst/metadata/name_provider.py @@ -0,0 +1,197 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import dataclasses +from pathlib import Path +from typing import Any, Collection, List, Mapping, Optional, Union + +import libcst as cst +from libcst._metadata_dependent import LazyValue, MetadataDependent +from libcst.helpers.module import calculate_module_and_package, ModuleNameAndPackage +from libcst.metadata.base_provider import BatchableMetadataProvider +from libcst.metadata.scope_provider import ( + QualifiedName, + QualifiedNameSource, + ScopeProvider, +) + + +class QualifiedNameProvider(BatchableMetadataProvider[Collection[QualifiedName]]): + """ + Compute possible qualified names of a variable CSTNode + (extends `PEP-3155 `_). + It uses the + :func:`~libcst.metadata.Scope.get_qualified_names_for` underlying to get qualified names. + Multiple qualified names may be returned, such as when we have conditional imports or an + import shadows another. E.g., the provider finds ``a.b``, ``d.e`` and + ``f.g`` as possible qualified names of ``c``:: + + >>> wrapper = MetadataWrapper( + >>> cst.parse_module(dedent( + >>> ''' + >>> if something: + >>> from a import b as c + >>> elif otherthing: + >>> from d import e as c + >>> else: + >>> from f import g as c + >>> c() + >>> ''' + >>> )) + >>> ) + >>> call = wrapper.module.body[1].body[0].value + >>> wrapper.resolve(QualifiedNameProvider)[call], + { + QualifiedName(name="a.b", source=QualifiedNameSource.IMPORT), + QualifiedName(name="d.e", source=QualifiedNameSource.IMPORT), + QualifiedName(name="f.g", source=QualifiedNameSource.IMPORT), + } + + For qualified name of a variable in a function or a comprehension, please refer + :func:`~libcst.metadata.Scope.get_qualified_names_for` for more detail. + """ + + METADATA_DEPENDENCIES = (ScopeProvider,) + + def visit_Module(self, node: cst.Module) -> Optional[bool]: + visitor = QualifiedNameVisitor(self) + node.visit(visitor) + + @staticmethod + def has_name( + visitor: MetadataDependent, node: cst.CSTNode, name: Union[str, QualifiedName] + ) -> bool: + """Check if any of qualified name has the str name or :class:`~libcst.metadata.QualifiedName` name.""" + qualified_names = visitor.get_metadata(QualifiedNameProvider, node, set()) + if isinstance(name, str): + return any(qn.name == name for qn in qualified_names) + else: + return any(qn == name for qn in qualified_names) + + +class QualifiedNameVisitor(cst.CSTVisitor): + def __init__(self, provider: "QualifiedNameProvider") -> None: + self.provider: QualifiedNameProvider = provider + + def on_visit(self, node: cst.CSTNode) -> bool: + scope = self.provider.get_metadata(ScopeProvider, node, None) + if scope: + self.provider.set_metadata( + node, LazyValue(lambda: scope.get_qualified_names_for(node)) + ) + else: + self.provider.set_metadata(node, set()) + super().on_visit(node) + return True + + +class FullyQualifiedNameProvider(BatchableMetadataProvider[Collection[QualifiedName]]): + """ + Provide fully qualified names for CST nodes. Like :class:`QualifiedNameProvider`, + but the provided :class:`QualifiedName` instances have absolute identifier names + instead of local to the current module. + + This provider is initialized with the current module's fully qualified name, and can + be used with :class:`~libcst.metadata.FullRepoManager`. The module's fully qualified + name itself is stored as a metadata of the :class:`~libcst.Module` node. Compared to + :class:`QualifiedNameProvider`, it also resolves relative imports. + + Example usage:: + + >>> mgr = FullRepoManager(".", {"dir/a.py"}, {FullyQualifiedNameProvider}) + >>> wrapper = mgr.get_metadata_wrapper_for_path("dir/a.py") + >>> fqnames = wrapper.resolve(FullyQualifiedNameProvider) + >>> {type(k): v for (k, v) in fqnames.items()} + {: {QualifiedName(name='dir.a', source=)}} + + """ + + METADATA_DEPENDENCIES = (QualifiedNameProvider,) + + @classmethod + def gen_cache( + cls, + root_path: Path, + paths: List[str], + *, + use_pyproject_toml: bool = False, + **kwargs: Any, + ) -> Mapping[str, ModuleNameAndPackage]: + cache = { + path: calculate_module_and_package( + root_path, path, use_pyproject_toml=use_pyproject_toml + ) + for path in paths + } + return cache + + def __init__(self, cache: ModuleNameAndPackage) -> None: + super().__init__(cache) + self.module_name: str = cache.name + self.package_name: str = cache.package + + def visit_Module(self, node: cst.Module) -> bool: + visitor = FullyQualifiedNameVisitor(self, self.module_name, self.package_name) + node.visit(visitor) + self.set_metadata( + node, + {QualifiedName(name=self.module_name, source=QualifiedNameSource.LOCAL)}, + ) + return True + + +class FullyQualifiedNameVisitor(cst.CSTVisitor): + @staticmethod + def _fully_qualify_local(module_name: str, package_name: str, name: str) -> str: + abs_name = name.lstrip(".") + num_dots = len(name) - len(abs_name) + # handle relative import + if num_dots > 0: + name = abs_name + # see importlib._bootstrap._resolve_name + # https://github.com/python/cpython/blob/3.10/Lib/importlib/_bootstrap.py#L902 + bits = package_name.rsplit(".", num_dots - 1) + if len(bits) < num_dots: + raise ImportError("attempted relative import beyond top-level package") + module_name = bits[0] + + return f"{module_name}.{name}" + + @staticmethod + def _fully_qualify( + module_name: str, package_name: str, qname: QualifiedName + ) -> QualifiedName: + if qname.source == QualifiedNameSource.BUILTIN: + # builtins are already fully qualified + return qname + name = qname.name + if qname.source == QualifiedNameSource.IMPORT and not name.startswith("."): + # non-relative imports are already fully qualified + return qname + new_name = FullyQualifiedNameVisitor._fully_qualify_local( + module_name, package_name, qname.name + ) + return dataclasses.replace(qname, name=new_name) + + def __init__( + self, provider: FullyQualifiedNameProvider, module_name: str, package_name: str + ) -> None: + self.module_name = module_name + self.package_name = package_name + self.provider = provider + + def on_visit(self, node: cst.CSTNode) -> bool: + qnames = self.provider.get_metadata(QualifiedNameProvider, node) + if qnames is not None: + self.provider.set_metadata( + node, + { + FullyQualifiedNameVisitor._fully_qualify( + self.module_name, self.package_name, qname + ) + for qname in qnames + }, + ) + return True diff --git a/libcst/metadata/parent_node_provider.py b/libcst/metadata/parent_node_provider.py index 1e569d51..901891b9 100644 --- a/libcst/metadata/parent_node_provider.py +++ b/libcst/metadata/parent_node_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/metadata/position_provider.py b/libcst/metadata/position_provider.py index 7bd1e713..bbc9bb72 100644 --- a/libcst/metadata/position_provider.py +++ b/libcst/metadata/position_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -16,7 +16,6 @@ from libcst._nodes.module import Module from libcst._position import CodePosition, CodeRange from libcst.metadata.base_provider import BaseMetadataProvider - NEWLINE_RE: Pattern[str] = re.compile(r"\r\n?|\n") diff --git a/libcst/metadata/reentrant_codegen.py b/libcst/metadata/reentrant_codegen.py index 89e11c93..899d2f1f 100644 --- a/libcst/metadata/reentrant_codegen.py +++ b/libcst/metadata/reentrant_codegen.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/metadata/scope_provider.py b/libcst/metadata/scope_provider.py index 043f87e8..909a55b2 100644 --- a/libcst/metadata/scope_provider.py +++ b/libcst/metadata/scope_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,9 +7,9 @@ import abc import builtins from collections import defaultdict -from contextlib import contextmanager +from contextlib import contextmanager, ExitStack from dataclasses import dataclass -from enum import Enum, auto +from enum import auto, Enum from typing import ( Collection, Dict, @@ -27,7 +27,6 @@ from typing import ( import libcst as cst from libcst import ensure_type from libcst._add_slots import add_slots -from libcst._metadata_dependent import MetadataDependent from libcst.helpers import get_full_name_for_node from libcst.metadata.base_provider import BatchableMetadataProvider from libcst.metadata.expression_context_provider import ( @@ -35,7 +34,8 @@ from libcst.metadata.expression_context_provider import ( ExpressionContextProvider, ) - +# Comprehensions are handled separately in _visit_comp_alike due to +# the complexity of the semantics _ASSIGNMENT_LIKE_NODES = ( cst.AnnAssign, cst.AsName, @@ -43,7 +43,6 @@ _ASSIGNMENT_LIKE_NODES = ( cst.AugAssign, cst.ClassDef, cst.CompFor, - cst.For, cst.FunctionDef, cst.Global, cst.Import, @@ -52,6 +51,10 @@ _ASSIGNMENT_LIKE_NODES = ( cst.Nonlocal, cst.Parameters, cst.WithItem, + cst.TypeVar, + cst.TypeAlias, + cst.TypeVarTuple, + cst.ParamSpec, ) @@ -74,9 +77,10 @@ class Access: #: The node of the access. A name is an access when the expression context is #: :attr:`ExpressionContext.LOAD`. This is usually the name node representing the - #: access, except for dotted imports, when it might be the attribute that - #: represents the most specific part of the imported symbol. - node: Union[cst.Name, cst.Attribute] + #: access, except for: 1) dotted imports, when it might be the attribute that + #: represents the most specific part of the imported symbol; and 2) string + #: annotations, when it is the entire string literal + node: Union[cst.Name, cst.Attribute, cst.BaseString] #: The scope of the access. Note that a access could be in a child scope of its #: assignment. @@ -116,18 +120,37 @@ class Access: self.__assignments.add(assignment) def record_assignments(self, name: str) -> None: - assignments = self.scope[name] + assignments = self.scope._resolve_scope_for_access(name, self.scope) # filter out assignments that happened later than this access previous_assignments = { assignment for assignment in assignments if assignment.scope != self.scope or assignment._index < self.__index } - if not previous_assignments and assignments: - previous_assignments = self.scope.parent[name] + if not previous_assignments and assignments and self.scope.parent != self.scope: + previous_assignments = self.scope.parent._resolve_scope_for_access( + name, self.scope + ) self.__assignments |= previous_assignments +class QualifiedNameSource(Enum): + IMPORT = auto() + BUILTIN = auto() + LOCAL = auto() + + +@add_slots +@dataclass(frozen=True) +class QualifiedName: + #: Qualified name, e.g. ``a.b.c`` or ``fn..var``. + name: str + + #: Source of the name, either :attr:`QualifiedNameSource.IMPORT`, :attr:`QualifiedNameSource.BUILTIN` + #: or :attr:`QualifiedNameSource.LOCAL`. + source: QualifiedNameSource + + class BaseAssignment(abc.ABC): """Abstract base class of :class:`Assignment` and :class:`BuitinAssignment`.""" @@ -175,6 +198,9 @@ class BaseAssignment(abc.ABC): """Return an integer that represents the order of assignments in `scope`""" return -1 + @abc.abstractmethod + def get_qualified_names_for(self, full_name: str) -> Set[QualifiedName]: ... + class Assignment(BaseAssignment): """An assignment records the name, CSTNode and its accesses.""" @@ -195,6 +221,18 @@ class Assignment(BaseAssignment): def _index(self) -> int: return self.__index + def get_qualified_names_for(self, full_name: str) -> Set[QualifiedName]: + return { + QualifiedName( + ( + f"{self.scope._name_prefix}.{full_name}" + if self.scope._name_prefix + else full_name + ), + QualifiedNameSource.LOCAL, + ) + } + # even though we don't override the constructor. class BuiltinAssignment(BaseAssignment): @@ -205,7 +243,80 @@ class BuiltinAssignment(BaseAssignment): `types `_. """ - pass + def get_qualified_names_for(self, full_name: str) -> Set[QualifiedName]: + return {QualifiedName(f"builtins.{self.name}", QualifiedNameSource.BUILTIN)} + + +class ImportAssignment(Assignment): + """An assignment records the import node and it's alias""" + + as_name: cst.CSTNode + + def __init__( + self, + name: str, + scope: "Scope", + node: cst.CSTNode, + index: int, + as_name: cst.CSTNode, + ) -> None: + super().__init__(name, scope, node, index) + self.as_name = as_name + + def get_module_name_for_import(self) -> str: + module = "" + if isinstance(self.node, cst.ImportFrom): + module_attr = self.node.module + relative = self.node.relative + if module_attr: + module = get_full_name_for_node(module_attr) or "" + if relative: + module = "." * len(relative) + module + return module + + def get_qualified_names_for(self, full_name: str) -> Set[QualifiedName]: + module = self.get_module_name_for_import() + results = set() + assert isinstance(self.node, (cst.ImportFrom, cst.Import)) + import_names = self.node.names + if not isinstance(import_names, cst.ImportStar): + for name in import_names: + real_name = get_full_name_for_node(name.name) + if not real_name: + continue + # real_name can contain `.` for dotted imports + # for these we want to find the longest prefix that matches full_name + parts = real_name.split(".") + real_names = [".".join(parts[:i]) for i in range(len(parts), 0, -1)] + for real_name in real_names: + as_name = real_name + if module and module.endswith("."): + # from . import a + # real_name should be ".a" + real_name = f"{module}{real_name}" + elif module: + real_name = f"{module}.{real_name}" + if name and name.asname: + eval_alias = name.evaluated_alias + if eval_alias is not None: + as_name = eval_alias + if full_name.startswith(as_name): + remaining_name = full_name.split(as_name, 1)[1] + if remaining_name and not remaining_name.startswith("."): + continue + remaining_name = remaining_name.lstrip(".") + results.add( + QualifiedName( + ( + f"{real_name}.{remaining_name}" + if remaining_name + else real_name + ), + QualifiedNameSource.IMPORT, + ) + ) + break + return results class Assignments: @@ -222,7 +333,7 @@ class Assignments: def __getitem__(self, node: Union[str, cst.CSTNode]) -> Collection[BaseAssignment]: """Get assignments given a name str or :class:`~libcst.CSTNode` by ``scope.assignments[node]``""" - name = _NameUtil.get_name_for(node) + name = get_full_name_for_node(node) return set(self._assignments[name]) if name in self._assignments else set() def __contains__(self, node: Union[str, cst.CSTNode]) -> bool: @@ -244,7 +355,7 @@ class Accesses: def __getitem__(self, node: Union[str, cst.CSTNode]) -> Collection[Access]: """Get accesses given a name str or :class:`~libcst.CSTNode` by ``scope.accesses[node]``""" - name = _NameUtil.get_name_for(node) + name = get_full_name_for_node(node) return self._accesses[name] if name in self._accesses else set() def __contains__(self, node: Union[str, cst.CSTNode]) -> bool: @@ -252,106 +363,6 @@ class Accesses: return len(self[node]) > 0 -class QualifiedNameSource(Enum): - IMPORT = auto() - BUILTIN = auto() - LOCAL = auto() - - -@add_slots -@dataclass(frozen=True) -class QualifiedName: - #: Qualified name, e.g. ``a.b.c`` or ``fn..var``. - name: str - - #: Source of the name, either :attr:`QualifiedNameSource.IMPORT`, :attr:`QualifiedNameSource.BUILTIN` - #: or :attr:`QualifiedNameSource.LOCAL`. - source: QualifiedNameSource - - -class _NameUtil: - @staticmethod - def get_name_for(node: Union[str, cst.CSTNode]) -> Optional[str]: - """A helper function to retrieve simple name str from a CSTNode or str""" - if isinstance(node, cst.Name): - return node.value - elif isinstance(node, str): - return node - elif isinstance(node, cst.Call): - return _NameUtil.get_name_for(node.func) - elif isinstance(node, cst.Subscript): - return _NameUtil.get_name_for(node.value) - elif isinstance(node, (cst.FunctionDef, cst.ClassDef)): - return _NameUtil.get_name_for(node.name) - return None - - @staticmethod - def find_qualified_name_for_import_alike( - assignment_node: Union[cst.Import, cst.ImportFrom], full_name: str - ) -> Set[QualifiedName]: - module = "" - results = set() - if isinstance(assignment_node, cst.ImportFrom): - module_attr = assignment_node.module - if module_attr: - # TODO: for relative import, keep the relative Dot in the qualified name - module = get_full_name_for_node(module_attr) - import_names = assignment_node.names - if not isinstance(import_names, cst.ImportStar): - for name in import_names: - real_name = get_full_name_for_node(name.name) - if not real_name: - continue - # real_name can contain `.` for dotted imports - # for these we want to find the longest prefix that matches full_name - parts = real_name.split(".") - real_names = [".".join(parts[:i]) for i in range(len(parts), 0, -1)] - for real_name in real_names: - as_name = real_name - if module: - real_name = f"{module}.{real_name}" - if name and name.asname: - eval_alias = name.evaluated_alias - if eval_alias is not None: - as_name = eval_alias - if full_name.startswith(as_name): - remaining_name = full_name.split(as_name, 1)[1].lstrip(".") - results.add( - QualifiedName( - f"{real_name}.{remaining_name}" - if remaining_name - else real_name, - QualifiedNameSource.IMPORT, - ) - ) - break - return results - - @staticmethod - def find_qualified_name_for_non_import( - assignment: Assignment, remaining_name: str - ) -> Set[QualifiedName]: - scope = assignment.scope - name_prefixes = [] - while scope: - if isinstance(scope, ClassScope): - name_prefixes.append(scope.name) - elif isinstance(scope, FunctionScope): - name_prefixes.append(f"{scope.name}.") - elif isinstance(scope, GlobalScope): - break - elif isinstance(scope, ComprehensionScope): - name_prefixes.append("") - else: - raise Exception(f"Unexpected Scope: {scope}") - scope = scope.parent - - parts = [*reversed(name_prefixes)] - if remaining_name: - parts.append(remaining_name) - return {QualifiedName(".".join(parts), QualifiedNameSource.LOCAL)} - - class Scope(abc.ABC): """ Base class of all scope classes. Scope object stores assignments from imports, @@ -377,43 +388,82 @@ class Scope(abc.ABC): #: Refers to the GlobalScope. globals: "GlobalScope" _assignments: MutableMapping[str, Set[BaseAssignment]] - _accesses: MutableMapping[str, Set[Access]] _assignment_count: int + _accesses_by_name: MutableMapping[str, Set[Access]] + _accesses_by_node: MutableMapping[cst.CSTNode, Set[Access]] + _name_prefix: str def __init__(self, parent: "Scope") -> None: super().__init__() self.parent = parent self.globals = parent.globals self._assignments = defaultdict(set) - self._accesses = defaultdict(set) self._assignment_count = 0 + self._accesses_by_name = defaultdict(set) + self._accesses_by_node = defaultdict(set) + self._name_prefix = "" def record_assignment(self, name: str, node: cst.CSTNode) -> None: - self._assignments[name].add( - Assignment(name=name, scope=self, node=node, index=self._assignment_count) + target = self._find_assignment_target(name) + target._assignments[name].add( + Assignment( + name=name, scope=target, node=node, index=target._assignment_count + ) ) + def record_import_assignment( + self, name: str, node: cst.CSTNode, as_name: cst.CSTNode + ) -> None: + target = self._find_assignment_target(name) + target._assignments[name].add( + ImportAssignment( + name=name, + scope=target, + node=node, + as_name=as_name, + index=target._assignment_count, + ) + ) + + def _find_assignment_target(self, name: str) -> "Scope": + return self + def record_access(self, name: str, access: Access) -> None: - self._accesses[name].add(access) + self._accesses_by_name[name].add(access) + self._accesses_by_node[access.node].add(access) - def _getitem_from_self_or_parent(self, name: str) -> Set[BaseAssignment]: - """Overridden by ClassScope to hide it's assignments from child scopes.""" - return self[name] + def _is_visible_from_children(self, from_scope: "Scope") -> bool: + """Returns if the assignments in this scope can be accessed from children. - def _contains_in_self_or_parent(self, name: str) -> bool: - """Overridden by ClassScope to hide it's assignments from child scopes.""" - return name in self + This is normally True, except for class scopes:: - def _record_assignment_as_parent(self, name: str, node: cst.CSTNode) -> None: - """Overridden by ClassScope to forward 'nonlocal' assignments from child scopes.""" - self.record_assignment(name, node) + def outer_fn(): + v = ... # outer_fn's declaration + class InnerCls: + v = ... # shadows outer_fn's declaration + class InnerInnerCls: + v = ... # shadows all previous declarations of v + def inner_fn(): + nonlocal v + v = ... # this refers to outer_fn's declaration + # and not to any of the inner classes' as those are + # hidden from their children. + """ + return True + + def _next_visible_parent( + self, from_scope: "Scope", first: Optional["Scope"] = None + ) -> "Scope": + parent = first if first is not None else self.parent + while not parent._is_visible_from_children(from_scope): + parent = parent.parent + return parent @abc.abstractmethod def __contains__(self, name: str) -> bool: - """ Check if the name str exist in current scope by ``name in scope``. """ + """Check if the name str exist in current scope by ``name in scope``.""" ... - @abc.abstractmethod def __getitem__(self, name: str) -> Set[BaseAssignment]: """ Get assignments given a name str by ``scope[name]``. @@ -451,18 +501,21 @@ class Scope(abc.ABC): defined a given name by the time a piece of code is executed. For the above example, value would resolve to a set of both assignments. """ - ... + return self._resolve_scope_for_access(name, self) + + @abc.abstractmethod + def _resolve_scope_for_access( + self, name: str, from_scope: "Scope" + ) -> Set[BaseAssignment]: ... def __hash__(self) -> int: return id(self) @abc.abstractmethod - def record_global_overwrite(self, name: str) -> None: - ... + def record_global_overwrite(self, name: str) -> None: ... @abc.abstractmethod - def record_nonlocal_overwrite(self, name: str) -> None: - ... + def record_nonlocal_overwrite(self, name: str) -> None: ... def get_qualified_names_for( self, node: Union[str, cst.CSTNode] @@ -495,34 +548,41 @@ class Scope(abc.ABC): considering it could be a complex type annotation in the string which is hard to resolve, e.g. ``List[Union[int, str]]``. """ - results = set() + # if this node is an access we know the assignment and we can use that name + node_accesses = ( + self._accesses_by_node.get(node) if isinstance(node, cst.CSTNode) else None + ) + if node_accesses: + return { + qname + for access in node_accesses + for referent in access.referents + for qname in referent.get_qualified_names_for(referent.name) + } + full_name = get_full_name_for_node(node) if full_name is None: - return results + return set() + assignments = set() - parts = full_name.split(".") - for i in range(len(parts), 0, -1): - prefix = ".".join(parts[:i]) + prefix = full_name + while prefix: if prefix in self: assignments = self[prefix] break + idx = prefix.rfind(".") + prefix = None if idx == -1 else prefix[:idx] + + if not isinstance(node, str): + for assignment in assignments: + if isinstance(assignment, Assignment) and _is_assignment( + node, assignment.node + ): + return assignment.get_qualified_names_for(full_name) + + results = set() for assignment in assignments: - if isinstance(assignment, Assignment): - assignment_node = assignment.node - if isinstance(assignment_node, (cst.Import, cst.ImportFrom)): - results |= _NameUtil.find_qualified_name_for_import_alike( - assignment_node, full_name - ) - else: - results |= _NameUtil.find_qualified_name_for_non_import( - assignment, full_name - ) - elif isinstance(assignment, BuiltinAssignment): - results.add( - QualifiedName( - f"builtins.{assignment.name}", QualifiedNameSource.BUILTIN - ) - ) + results |= assignment.get_qualified_names_for(full_name) return results @property @@ -533,7 +593,42 @@ class Scope(abc.ABC): @property def accesses(self) -> Accesses: """Return an :class:`~libcst.metadata.Accesses` contains all accesses in current scope.""" - return Accesses(self._accesses) + return Accesses(self._accesses_by_name) + + +class BuiltinScope(Scope): + """ + A BuiltinScope represents python builtin declarations. See https://docs.python.org/3/library/builtins.html + """ + + def __init__(self, globals: Scope) -> None: + self.globals: Scope = globals # must be defined before Scope.__init__ is called + super().__init__(parent=self) + + def __contains__(self, name: str) -> bool: + return hasattr(builtins, name) + + def _resolve_scope_for_access( + self, name: str, from_scope: "Scope" + ) -> Set[BaseAssignment]: + if name in self._assignments: + return self._assignments[name] + if hasattr(builtins, name): + # note - we only see the builtin assignments during the deferred + # access resolution. unfortunately that means we have to create the + # assignment here, which can cause the set to mutate during iteration + self._assignments[name].add(BuiltinAssignment(name, self)) + return self._assignments[name] + return set() + + def record_global_overwrite(self, name: str) -> None: + raise NotImplementedError("global overwrite in builtin scope are not allowed") + + def record_nonlocal_overwrite(self, name: str) -> None: + raise NotImplementedError("declarations in builtin scope are not allowed") + + def _find_assignment_target(self, name: str) -> "Scope": + raise NotImplementedError("assignments in builtin scope are not allowed") class GlobalScope(Scope): @@ -542,21 +637,21 @@ class GlobalScope(Scope): """ def __init__(self) -> None: - self.globals: Scope = self # must be defined before Scope.__init__ is called - super().__init__(parent=self) + super().__init__(parent=BuiltinScope(self)) def __contains__(self, name: str) -> bool: - return hasattr(builtins, name) or ( - name in self._assignments and len(self._assignments[name]) > 0 - ) + if name in self._assignments: + return len(self._assignments[name]) > 0 + return name in self._next_visible_parent(self) - def __getitem__(self, name: str) -> Set[BaseAssignment]: - if hasattr(builtins, name): - if not any( - isinstance(i, BuiltinAssignment) for i in self._assignments[name] - ): - self._assignments[name].add(BuiltinAssignment(name, self)) - return self._assignments[name] + def _resolve_scope_for_access( + self, name: str, from_scope: "Scope" + ) -> Set[BaseAssignment]: + if name in self._assignments: + return self._assignments[name] + + parent = self._next_visible_parent(from_scope) + return parent[name] def record_global_overwrite(self, name: str) -> None: pass @@ -581,6 +676,8 @@ class LocalScope(Scope, abc.ABC): self.name = name self.node = node self._scope_overwrites = {} + # pyre-fixme[4]: Attribute `_name_prefix` of class `LocalScope` has type `str` but no type is specified. + self._name_prefix = self._make_name_prefix() def record_global_overwrite(self, name: str) -> None: self._scope_overwrites[name] = self.globals @@ -588,26 +685,38 @@ class LocalScope(Scope, abc.ABC): def record_nonlocal_overwrite(self, name: str) -> None: self._scope_overwrites[name] = self.parent - def record_assignment(self, name: str, node: cst.CSTNode) -> None: + def _find_assignment_target(self, name: str) -> "Scope": if name in self._scope_overwrites: - self._scope_overwrites[name]._record_assignment_as_parent(name, node) + scope = self._scope_overwrites[name] + return self._next_visible_parent(self, scope)._find_assignment_target(name) else: - super().record_assignment(name, node) + return super()._find_assignment_target(name) def __contains__(self, name: str) -> bool: if name in self._scope_overwrites: return name in self._scope_overwrites[name] if name in self._assignments: return len(self._assignments[name]) > 0 - return self.parent._contains_in_self_or_parent(name) + return name in self._next_visible_parent(self) - def __getitem__(self, name: str) -> Set[BaseAssignment]: + def _resolve_scope_for_access( + self, name: str, from_scope: "Scope" + ) -> Set[BaseAssignment]: if name in self._scope_overwrites: - return self._scope_overwrites[name]._getitem_from_self_or_parent(name) + scope = self._scope_overwrites[name] + return self._next_visible_parent( + from_scope, scope + )._resolve_scope_for_access(name, from_scope) if name in self._assignments: return self._assignments[name] else: - return self.parent._getitem_from_self_or_parent(name) + return self._next_visible_parent(from_scope)._resolve_scope_for_access( + name, from_scope + ) + + def _make_name_prefix(self) -> str: + # filter falsey strings out + return ".".join(filter(None, [self.parent._name_prefix, self.name, ""])) # even though we don't override the constructor. @@ -625,35 +734,12 @@ class ClassScope(LocalScope): When a class is defined, it creates a ClassScope. """ - def _record_assignment_as_parent(self, name: str, node: cst.CSTNode) -> None: - """ - Forward the assignment to parent. + def _is_visible_from_children(self, from_scope: "Scope") -> bool: + return from_scope.parent is self and isinstance(from_scope, AnnotationScope) - def outer_fn(): - v = ... # outer_fn's declaration - class InnerCls: - v = ... # shadows outer_fn's declaration - def inner_fn(): - nonlocal v - v = ... # this should actually refer to outer_fn's declaration - # and not to InnerCls's, because InnerCls's scope is - # hidden from its children. - - """ - self.parent._record_assignment_as_parent(name, node) - - def _getitem_from_self_or_parent(self, name: str) -> Set[BaseAssignment]: - """ - Class variables are only accessible using ClassName.attribute, cls.attribute, or - self.attribute in child scopes. They cannot be accessed with their bare names. - """ - return self.parent._getitem_from_self_or_parent(name) - - def _contains_in_self_or_parent(self, name: str) -> bool: - """ - See :meth:`_getitem_from_self_or_parent` - """ - return self.parent._contains_in_self_or_parent(name) + def _make_name_prefix(self) -> str: + # filter falsey strings out + return ".".join(filter(None, [self.parent._name_prefix, self.name])) # even though we don't override the constructor. @@ -669,14 +755,30 @@ class ComprehensionScope(LocalScope): # TODO: Assignment expressions (Python 3.8) will complicate ComprehensionScopes, # and will require us to handle such assignments as non-local. # https://www.python.org/dev/peps/pep-0572/#scope-of-the-target - pass + + def _make_name_prefix(self) -> str: + # filter falsey strings out + return ".".join(filter(None, [self.parent._name_prefix, ""])) + + +class AnnotationScope(LocalScope): + """ + Scopes used for type aliases and type parameters as defined by PEP-695. + + These scopes are created for type parameters using the special syntax, as well as + type aliases. See https://peps.python.org/pep-0695/#scoping-behavior for more. + """ + + def _make_name_prefix(self) -> str: + # these scopes are transparent for the purposes of qualified names + return self.parent._name_prefix # Generates dotted names from an Attribute or Name node: # Attribute(value=Name(value="a"), attr=Name(value="b")) -> ("a.b", "a") # each string has the corresponding CSTNode attached to it def _gen_dotted_names( - node: Union[cst.Attribute, cst.Name] + node: Union[cst.Attribute, cst.Name], ) -> Iterator[Tuple[str, Union[cst.Attribute, cst.Name]]]: if isinstance(node, cst.Name): yield node.value, node @@ -705,18 +807,49 @@ def _gen_dotted_names( yield from name_values +def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool: + """ + Returns true if ``node`` is part of the assignment at ``assignment_node``. + + Normally this is just a simple identity check, except for imports where the + assignment is attached to the entire import statement but we are interested in + ``Name`` nodes inside the statement. + """ + if node is assignment_node: + return True + if isinstance(assignment_node, (cst.Import, cst.ImportFrom)): + aliases = assignment_node.names + if isinstance(aliases, cst.ImportStar): + return False + for alias in aliases: + if alias.name is node: + return True + asname = alias.asname + if asname is not None: + if asname.name is node: + return True + return False + + +@dataclass(frozen=True) +class DeferredAccess: + access: Access + enclosing_attribute: Optional[cst.Attribute] + enclosing_string_annotation: Optional[cst.BaseString] + + class ScopeVisitor(cst.CSTVisitor): # since it's probably not useful. That can makes this visitor cleaner. def __init__(self, provider: "ScopeProvider") -> None: + super().__init__() self.provider: ScopeProvider = provider self.scope: Scope = GlobalScope() - self.__deferred_accesses: List[Tuple[Access, Optional[cst.Attribute]]] = [] + self.__deferred_accesses: List[DeferredAccess] = [] self.__top_level_attribute_stack: List[Optional[cst.Attribute]] = [None] - self.__in_annotation: Set[ - Union[cst.Call, cst.Annotation, cst.Subscript] - ] = set() - self.__in_type_hint: Set[Union[cst.Call, cst.Annotation, cst.Subscript]] = set() + self.__in_annotation_stack: List[bool] = [False] + self.__in_type_hint_stack: List[bool] = [False] self.__in_ignored_subscript: Set[cst.Subscript] = set() + self.__last_string_annotation: Optional[cst.BaseString] = None self.__ignore_annotation: int = 0 @contextmanager @@ -750,11 +883,15 @@ class ScopeVisitor(cst.CSTVisitor): asname = name.asname if asname is not None: name_values = _gen_dotted_names(cst.ensure_type(asname.name, cst.Name)) + import_node_asname = asname.name else: name_values = _gen_dotted_names(name.name) + import_node_asname = name.name for name_value, _ in name_values: - self.scope.record_assignment(name_value, node) + self.scope.record_import_assignment( + name_value, node, import_node_asname + ) return False def visit_Import(self, node: cst.Import) -> Optional[bool]: @@ -773,30 +910,34 @@ class ScopeVisitor(cst.CSTVisitor): def visit_Call(self, node: cst.Call) -> Optional[bool]: self.__top_level_attribute_stack.append(None) + self.__in_type_hint_stack.append(False) qnames = {qn.name for qn in self.scope.get_qualified_names_for(node)} if "typing.NewType" in qnames or "typing.TypeVar" in qnames: node.func.visit(self) - self.__in_type_hint.add(node) + self.__in_type_hint_stack[-1] = True for arg in node.args[1:]: arg.visit(self) return False if "typing.cast" in qnames: node.func.visit(self) - self.__in_type_hint.add(node) if len(node.args) > 0: + self.__in_type_hint_stack.append(True) node.args[0].visit(self) + self.__in_type_hint_stack.pop() + for arg in node.args[1:]: + arg.visit(self) return False return True def leave_Call(self, original_node: cst.Call) -> None: self.__top_level_attribute_stack.pop() - self.__in_type_hint.discard(original_node) + self.__in_type_hint_stack.pop() def visit_Annotation(self, node: cst.Annotation) -> Optional[bool]: - self.__in_annotation.add(node) + self.__in_annotation_stack.append(True) def leave_Annotation(self, original_node: cst.Annotation) -> None: - self.__in_annotation.discard(original_node) + self.__in_annotation_stack.pop() def visit_SimpleString(self, node: cst.SimpleString) -> Optional[bool]: self._handle_string_annotation(node) @@ -810,25 +951,39 @@ class ScopeVisitor(cst.CSTVisitor): ) -> bool: """Returns whether it successfully handled the string annotation""" if ( - self.__in_type_hint or self.__in_annotation + self.__in_type_hint_stack[-1] or self.__in_annotation_stack[-1] ) and not self.__in_ignored_subscript: value = node.evaluated_value if value: - mod = cst.parse_module(value) - mod.visit(self) + top_level_annotation = self.__last_string_annotation is None + if top_level_annotation: + self.__last_string_annotation = node + try: + mod = cst.parse_module(value) + mod.visit(self) + except cst.ParserSyntaxError: + # swallow string annotation parsing errors + # this is the same behavior as cPython + pass + if top_level_annotation: + self.__last_string_annotation = None return True return False def visit_Subscript(self, node: cst.Subscript) -> Optional[bool]: - qnames = {qn.name for qn in self.scope.get_qualified_names_for(node.value)} - if any(qn.startswith(("typing.", "typing_extensions.")) for qn in qnames): - self.__in_type_hint.add(node) - if "typing.Literal" in qnames or "typing_extensions.Literal" in qnames: - self.__in_ignored_subscript.add(node) + in_type_hint = False + if isinstance(node.value, cst.Name): + qnames = {qn.name for qn in self.scope.get_qualified_names_for(node.value)} + if any(qn.startswith(("typing.", "typing_extensions.")) for qn in qnames): + in_type_hint = True + if "typing.Literal" in qnames or "typing_extensions.Literal" in qnames: + self.__in_ignored_subscript.add(node) + + self.__in_type_hint_stack.append(in_type_hint) return True def leave_Subscript(self, original_node: cst.Subscript) -> None: - self.__in_type_hint.discard(original_node) + self.__in_type_hint_stack.pop() self.__in_ignored_subscript.discard(original_node) def visit_Name(self, node: cst.Name) -> Optional[bool]: @@ -841,27 +996,38 @@ class ScopeVisitor(cst.CSTVisitor): node, self.scope, is_annotation=bool( - self.__in_annotation and not self.__ignore_annotation + self.__in_annotation_stack[-1] and not self.__ignore_annotation ), - is_type_hint=bool(self.__in_type_hint), + is_type_hint=bool(self.__in_type_hint_stack[-1]), ) self.__deferred_accesses.append( - (access, self.__top_level_attribute_stack[-1]) + DeferredAccess( + access=access, + enclosing_attribute=self.__top_level_attribute_stack[-1], + enclosing_string_annotation=self.__last_string_annotation, + ) ) def visit_FunctionDef(self, node: cst.FunctionDef) -> Optional[bool]: self.scope.record_assignment(node.name.value, node) self.provider.set_metadata(node.name, self.scope) - with self._new_scope(FunctionScope, node, get_full_name_for_node(node.name)): - node.params.visit(self) - node.body.visit(self) + with ExitStack() as stack: + if node.type_parameters: + stack.enter_context(self._new_scope(AnnotationScope, node, None)) + node.type_parameters.visit(self) - for decorator in node.decorators: - decorator.visit(self) - returns = node.returns - if returns: - returns.visit(self) + with self._new_scope( + FunctionScope, node, get_full_name_for_node(node.name) + ): + node.params.visit(self) + node.body.visit(self) + + for decorator in node.decorators: + decorator.visit(self) + returns = node.returns + if returns: + returns.visit(self) return False @@ -893,14 +1059,20 @@ class ScopeVisitor(cst.CSTVisitor): self.provider.set_metadata(node.name, self.scope) for decorator in node.decorators: decorator.visit(self) - for base in node.bases: - base.visit(self) - for keyword in node.keywords: - keyword.visit(self) - with self._new_scope(ClassScope, node, get_full_name_for_node(node.name)): - for statement in node.body.body: - statement.visit(self) + with ExitStack() as stack: + if node.type_parameters: + stack.enter_context(self._new_scope(AnnotationScope, node, None)) + node.type_parameters.visit(self) + + for base in node.bases: + base.visit(self) + for keyword in node.keywords: + keyword.visit(self) + + with self._new_scope(ClassScope, node, get_full_name_for_node(node.name)): + for statement in node.body.body: + statement.visit(self) return False def visit_ClassDef_bases(self, node: cst.ClassDef) -> None: @@ -973,6 +1145,8 @@ class ScopeVisitor(cst.CSTVisitor): self.provider.set_metadata(for_in, self.scope) with self._new_scope(ComprehensionScope, node): for_in.target.visit(self) + # Things from here on can refer to the target. + self.scope._assignment_count += 1 for condition in for_in.ifs: condition.visit(self) inner_for_in = for_in.inner_for_in @@ -985,12 +1159,25 @@ class ScopeVisitor(cst.CSTVisitor): node.elt.visit(self) return False + def visit_For(self, node: cst.For) -> Optional[bool]: + node.target.visit(self) + self.scope._assignment_count += 1 + for child in [node.iter, node.body, node.orelse, node.asynchronous]: + if child is not None: + child.visit(self) + return False + def infer_accesses(self) -> None: # Aggregate access with the same name and batch add with set union as an optimization. # In worst case, all accesses (m) and assignments (n) refer to the same name, # the time complexity is O(m x n), this optimizes it as O(m + n). scope_name_accesses = defaultdict(set) - for (access, enclosing_attribute) in self.__deferred_accesses: + for def_access in self.__deferred_accesses: + access, enclosing_attribute, enclosing_string_annotation = ( + def_access.access, + def_access.enclosing_attribute, + def_access.enclosing_string_annotation, + ) name = ensure_type(access.node, cst.Name).value if enclosing_attribute is not None: # if _gen_dotted_names doesn't generate any values, fall back to @@ -1001,12 +1188,15 @@ class ScopeVisitor(cst.CSTVisitor): name = attr_name break + if enclosing_string_annotation is not None: + access.node = enclosing_string_annotation + scope_name_accesses[(access.scope, name)].add(access) access.record_assignments(name) access.scope.record_access(name, access) for (scope, name), accesses in scope_name_accesses.items(): - for assignment in scope[name]: + for assignment in scope._resolve_scope_for_access(name, scope): assignment.record_accesses(accesses) self.__deferred_accesses = [] @@ -1017,6 +1207,32 @@ class ScopeVisitor(cst.CSTVisitor): self.scope._assignment_count += 1 super().on_leave(original_node) + def visit_TypeAlias(self, node: cst.TypeAlias) -> Optional[bool]: + self.scope.record_assignment(node.name.value, node) + + with self._new_scope(AnnotationScope, node, None): + if node.type_parameters is not None: + node.type_parameters.visit(self) + node.value.visit(self) + + return False + + def visit_TypeVar(self, node: cst.TypeVar) -> Optional[bool]: + self.scope.record_assignment(node.name.value, node) + + if node.bound is not None: + node.bound.visit(self) + + return False + + def visit_TypeVarTuple(self, node: cst.TypeVarTuple) -> Optional[bool]: + self.scope.record_assignment(node.name.value, node) + return False + + def visit_ParamSpec(self, node: cst.ParamSpec) -> Optional[bool]: + self.scope.record_assignment(node.name.value, node) + return False + class ScopeProvider(BatchableMetadataProvider[Optional[Scope]]): """ @@ -1036,70 +1252,3 @@ class ScopeProvider(BatchableMetadataProvider[Optional[Scope]]): visitor = ScopeVisitor(self) node.visit(visitor) visitor.infer_accesses() - - -class QualifiedNameVisitor(cst.CSTVisitor): - def __init__(self, provider: "QualifiedNameProvider") -> None: - self.provider: QualifiedNameProvider = provider - - def on_visit(self, node: cst.CSTNode) -> bool: - scope = self.provider.get_metadata(ScopeProvider, node, None) - if scope: - self.provider.set_metadata(node, scope.get_qualified_names_for(node)) - else: - self.provider.set_metadata(node, set()) - super().on_visit(node) - return True - - -class QualifiedNameProvider(BatchableMetadataProvider[Collection[QualifiedName]]): - """ - Compute possible qualified names of a variable CSTNode - (extends `PEP-3155 `_). - It uses the - :func:`~libcst.metadata.Scope.get_qualified_names_for` underlying to get qualified names. - Multiple qualified names may be returned, such as when we have conditional imports or an - import shadows another. E.g., the provider finds ``a.b``, ``d.e`` and - ``f.g`` as possible qualified names of ``c``:: - - >>> wrapper = MetadataWrapper( - >>> cst.parse_module(dedent( - >>> ''' - >>> if something: - >>> from a import b as c - >>> elif otherthing: - >>> from d import e as c - >>> else: - >>> from f import g as c - >>> c() - >>> ''' - >>> )) - >>> ) - >>> call = wrapper.module.body[1].body[0].value - >>> wrapper.resolve(QualifiedNameProvider)[call], - { - QualifiedName(name="a.b", source=QualifiedNameSource.IMPORT), - QualifiedName(name="d.e", source=QualifiedNameSource.IMPORT), - QualifiedName(name="f.g", source=QualifiedNameSource.IMPORT), - } - - For qualified name of a variable in a function or a comprehension, please refer - :func:`~libcst.metadata.Scope.get_qualified_names_for` for more detail. - """ - - METADATA_DEPENDENCIES = (ScopeProvider,) - - def visit_Module(self, node: cst.Module) -> Optional[bool]: - visitor = QualifiedNameVisitor(self) - node.visit(visitor) - - @staticmethod - def has_name( - visitor: MetadataDependent, node: cst.CSTNode, name: Union[str, QualifiedName] - ) -> bool: - """Check if any of qualified name has the str name or :class:`~libcst.metadata.QualifiedName` name.""" - qualified_names = visitor.get_metadata(QualifiedNameProvider, node, set()) - if isinstance(name, str): - return any(qn.name == name for qn in qualified_names) - else: - return any(qn == name for qn in qualified_names) diff --git a/libcst/metadata/span_provider.py b/libcst/metadata/span_provider.py index 400ec284..39e7b86c 100644 --- a/libcst/metadata/span_provider.py +++ b/libcst/metadata/span_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/metadata/tests/__init__.py b/libcst/metadata/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/metadata/tests/__init__.py +++ b/libcst/metadata/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/metadata/tests/test_accessor_provider.py b/libcst/metadata/tests/test_accessor_provider.py new file mode 100644 index 00000000..6ccfad5e --- /dev/null +++ b/libcst/metadata/tests/test_accessor_provider.py @@ -0,0 +1,68 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import dataclasses + +from textwrap import dedent + +import libcst as cst +from libcst.metadata import AccessorProvider, MetadataWrapper +from libcst.testing.utils import data_provider, UnitTest + + +class DependentVisitor(cst.CSTVisitor): + METADATA_DEPENDENCIES = (AccessorProvider,) + + def __init__(self, *, test: UnitTest) -> None: + self.test = test + + def on_visit(self, node: cst.CSTNode) -> bool: + for f in dataclasses.fields(node): + child = getattr(node, f.name) + if type(child) is cst.CSTNode: + accessor = self.get_metadata(AccessorProvider, child) + self.test.assertEqual(accessor, f.name) + + return True + + +class AccessorProviderTest(UnitTest): + @data_provider( + ( + ( + """ + foo = 'toplevel' + fn1(foo) + fn2(foo) + def fn_def(): + foo = 'shadow' + fn3(foo) + """, + ), + ( + """ + global_var = None + @cls_attr + class Cls(cls_attr, kwarg=cls_attr): + cls_attr = 5 + def f(): + pass + """, + ), + ( + """ + iterator = None + condition = None + [elt for target in iterator if condition] + {elt for target in iterator if condition} + {elt: target for target in iterator if condition} + (elt for target in iterator if condition) + """, + ), + ) + ) + def test_accessor_provier(self, code: str) -> None: + wrapper = MetadataWrapper(cst.parse_module(dedent(code))) + wrapper.visit(DependentVisitor(test=self)) diff --git a/libcst/metadata/tests/test_base_provider.py b/libcst/metadata/tests/test_base_provider.py index e19a4571..26ebde70 100644 --- a/libcst/metadata/tests/test_base_provider.py +++ b/libcst/metadata/tests/test_base_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -7,6 +7,7 @@ from typing import cast import libcst as cst from libcst import parse_module +from libcst._metadata_dependent import LazyValue from libcst.metadata import ( BatchableMetadataProvider, MetadataWrapper, @@ -75,3 +76,63 @@ class BaseMetadataProviderTest(UnitTest): self.assertEqual(metadata[SimpleProvider][pass_], 1) self.assertEqual(metadata[SimpleProvider][return_], 2) self.assertEqual(metadata[SimpleProvider][pass_2], 1) + + def test_lazy_visitor_provider(self) -> None: + class SimpleLazyProvider(VisitorMetadataProvider[int]): + """ + Sets metadata on every node to a callable that returns 1. + """ + + def on_visit(self, node: cst.CSTNode) -> bool: + self.set_metadata(node, LazyValue(lambda: 1)) + return True + + wrapper = MetadataWrapper(parse_module("pass; return")) + module = wrapper.module + pass_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] + return_ = cast(cst.SimpleStatementLine, module.body[0]).body[1] + + provider = SimpleLazyProvider() + metadata = provider._gen(wrapper) + + # Check access on provider + self.assertEqual(provider.get_metadata(SimpleLazyProvider, module), 1) + self.assertEqual(provider.get_metadata(SimpleLazyProvider, pass_), 1) + self.assertEqual(provider.get_metadata(SimpleLazyProvider, return_), 1) + + # Check returned mapping + self.assertTrue(isinstance(metadata[module], LazyValue)) + self.assertTrue(isinstance(metadata[pass_], LazyValue)) + self.assertTrue(isinstance(metadata[return_], LazyValue)) + + def testlazy_batchable_provider(self) -> None: + class SimpleLazyProvider(BatchableMetadataProvider[int]): + """ + Sets metadata on every pass node to a callable that returns 1, + and every return node to a callable that returns 2. + """ + + def visit_Pass(self, node: cst.Pass) -> None: + self.set_metadata(node, LazyValue(lambda: 1)) + + def visit_Return(self, node: cst.Return) -> None: + self.set_metadata(node, LazyValue(lambda: 2)) + + wrapper = MetadataWrapper(parse_module("pass; return; pass")) + module = wrapper.module + pass_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] + return_ = cast(cst.SimpleStatementLine, module.body[0]).body[1] + pass_2 = cast(cst.SimpleStatementLine, module.body[0]).body[2] + + provider = SimpleLazyProvider() + metadata = _gen_batchable(wrapper, [provider]) + + # Check access on provider + self.assertEqual(provider.get_metadata(SimpleLazyProvider, pass_), 1) + self.assertEqual(provider.get_metadata(SimpleLazyProvider, return_), 2) + self.assertEqual(provider.get_metadata(SimpleLazyProvider, pass_2), 1) + + # Check returned mapping + self.assertTrue(isinstance(metadata[SimpleLazyProvider][pass_], LazyValue)) + self.assertTrue(isinstance(metadata[SimpleLazyProvider][return_], LazyValue)) + self.assertTrue(isinstance(metadata[SimpleLazyProvider][pass_2], LazyValue)) diff --git a/libcst/metadata/tests/test_expression_context_provider.py b/libcst/metadata/tests/test_expression_context_provider.py index 25cc1d0d..88ed0a88 100644 --- a/libcst/metadata/tests/test_expression_context_provider.py +++ b/libcst/metadata/tests/test_expression_context_provider.py @@ -1,11 +1,11 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from textwrap import dedent -from typing import Dict, Optional, cast +from typing import cast, Dict, Optional import libcst as cst from libcst import parse_module @@ -411,3 +411,23 @@ class ExpressionContextProviderTest(UnitTest): }, ) ) + + def test_walrus(self) -> None: + code = """ + if x := y: + pass + """ + wrapper = MetadataWrapper( + parse_module( + dedent(code), config=cst.PartialParserConfig(python_version="3.8") + ) + ) + wrapper.visit( + DependentVisitor( + test=self, + name_to_context={ + "x": ExpressionContext.STORE, + "y": ExpressionContext.LOAD, + }, + ) + ) diff --git a/libcst/metadata/tests/test_file_path_provider.py b/libcst/metadata/tests/test_file_path_provider.py new file mode 100644 index 00000000..2b0631f5 --- /dev/null +++ b/libcst/metadata/tests/test_file_path_provider.py @@ -0,0 +1,145 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Set + +import libcst +from libcst._visitors import CSTVisitor +from libcst.helpers.paths import chdir +from libcst.metadata import FilePathProvider, FullRepoManager, MetadataWrapper +from libcst.testing.utils import UnitTest + + +class FilePathProviderTest(UnitTest): + def setUp(self) -> None: + self.td = TemporaryDirectory() + self.tdp = Path(self.td.name).resolve() + self.addCleanup(self.td.cleanup) + + def test_provider_cache(self) -> None: + pkg = self.tdp / "pkg" + pkg.mkdir() + files = [Path(pkg / name) for name in ("file1.py", "file2.py", "file3.py")] + [file.write_text("print('hello')\n") for file in files] + + with self.subTest("absolute paths"): + repo_manager = FullRepoManager( + self.tdp, [f.as_posix() for f in files], {FilePathProvider} + ) + repo_manager.resolve_cache() + + expected = { + FilePathProvider: {f.as_posix(): f for f in files}, + } + self.assertDictEqual(expected, repo_manager.cache) + + with self.subTest("repo relative paths"): + repo_manager = FullRepoManager( + self.tdp, + [f.relative_to(self.tdp).as_posix() for f in files], + {FilePathProvider}, + ) + repo_manager.resolve_cache() + + expected = { + FilePathProvider: { + f.relative_to(self.tdp).as_posix(): f for f in files + }, + } + self.assertDictEqual(expected, repo_manager.cache) + + with self.subTest("dot relative paths"): + with chdir(self.tdp): + repo_manager = FullRepoManager( + ".", + [f.relative_to(self.tdp).as_posix() for f in files], + {FilePathProvider}, + ) + repo_manager.resolve_cache() + + expected = { + FilePathProvider: { + f.relative_to(self.tdp).as_posix(): f for f in files + }, + } + self.assertDictEqual(expected, repo_manager.cache) + + def test_visitor(self) -> None: + pkg = self.tdp / "pkg" + pkg.mkdir() + files = [Path(pkg / name) for name in ("file1.py", "file2.py", "file3.py")] + [file.write_text("print('hello')\n") for file in files] + + seen: Set[Path] = set() + + class FakeVisitor(CSTVisitor): + METADATA_DEPENDENCIES = [FilePathProvider] + + def visit_Module(self, node: libcst.Module) -> None: + seen.add(self.get_metadata(FilePathProvider, node)) + + with self.subTest("absolute paths"): + seen.clear() + repo_manager = FullRepoManager( + self.tdp, [f.as_posix() for f in files], {FilePathProvider} + ) + repo_manager.resolve_cache() + + for file in files: + module = libcst.parse_module(file.read_bytes()) + wrapper = MetadataWrapper( + module, cache=repo_manager.get_cache_for_path(file.as_posix()) + ) + wrapper.visit(FakeVisitor()) + + expected = set(files) + self.assertSetEqual(expected, seen) + + with self.subTest("repo relative paths"): + seen.clear() + repo_manager = FullRepoManager( + self.tdp, + [f.relative_to(self.tdp).as_posix() for f in files], + {FilePathProvider}, + ) + repo_manager.resolve_cache() + + for file in files: + module = libcst.parse_module(file.read_bytes()) + wrapper = MetadataWrapper( + module, + cache=repo_manager.get_cache_for_path( + file.relative_to(self.tdp).as_posix() + ), + ) + wrapper.visit(FakeVisitor()) + + expected = set(files) + self.assertSetEqual(expected, seen) + + with self.subTest("dot relative paths"): + with chdir(self.tdp): + seen.clear() + repo_manager = FullRepoManager( + ".", + [f.relative_to(self.tdp).as_posix() for f in files], + {FilePathProvider}, + ) + repo_manager.resolve_cache() + + for file in files: + module = libcst.parse_module(file.read_bytes()) + wrapper = MetadataWrapper( + module, + cache=repo_manager.get_cache_for_path( + file.relative_to(self.tdp).as_posix() + ), + ) + wrapper.visit(FakeVisitor()) + + expected = set(files) + self.assertSetEqual(expected, seen) diff --git a/libcst/metadata/tests/test_full_repo_manager.py b/libcst/metadata/tests/test_full_repo_manager.py index ff7ebfd0..27066f5a 100644 --- a/libcst/metadata/tests/test_full_repo_manager.py +++ b/libcst/metadata/tests/test_full_repo_manager.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -12,7 +12,6 @@ from libcst.metadata.tests.test_type_inference_provider import _test_simple_clas from libcst.metadata.type_inference_provider import TypeInferenceProvider from libcst.testing.utils import UnitTest - REPO_ROOT_DIR: str = str(Path(__file__).parent.parent.parent.resolve()) diff --git a/libcst/metadata/tests/test_metadata_provider.py b/libcst/metadata/tests/test_metadata_provider.py index 4f7be175..7de94851 100644 --- a/libcst/metadata/tests/test_metadata_provider.py +++ b/libcst/metadata/tests/test_metadata_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/metadata/tests/test_metadata_wrapper.py b/libcst/metadata/tests/test_metadata_wrapper.py index 731a910f..9063a99a 100644 --- a/libcst/metadata/tests/test_metadata_wrapper.py +++ b/libcst/metadata/tests/test_metadata_wrapper.py @@ -1,13 +1,18 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional +from unittest.mock import Mock import libcst as cst -from libcst.metadata import BatchableMetadataProvider, MetadataWrapper +from libcst.metadata import ( + BatchableMetadataProvider, + MetadataWrapper, + VisitorMetadataProvider, +) from libcst.testing.utils import UnitTest @@ -43,9 +48,13 @@ class MetadataWrapperTest(UnitTest): self.assertNotEqual(hash(mw1), hash(mw3)) self.assertNotEqual(hash(mw2), hash(mw3)) + @staticmethod + def ignore_args(*args: object, **kwargs: object) -> tuple[object, ...]: + return (args, kwargs) + def test_metadata_cache(self) -> None: class DummyMetadataProvider(BatchableMetadataProvider[None]): - gen_cache = tuple + gen_cache = self.ignore_args m = cst.parse_module("pass") mw = MetadataWrapper(m) @@ -55,7 +64,7 @@ class MetadataWrapperTest(UnitTest): mw.resolve(DummyMetadataProvider) class SimpleCacheMetadataProvider(BatchableMetadataProvider[object]): - gen_cache = tuple + gen_cache = self.ignore_args def __init__(self, cache: object) -> None: super().__init__(cache) @@ -65,11 +74,57 @@ class MetadataWrapperTest(UnitTest): self.set_metadata(node, self.cache) cached_data = object() - # pyre-fixme[6]: Expected `Mapping[Type[BaseMetadataProvider[object]], - # object]` for 2nd param but got `Dict[Type[SimpleCacheMetadataProvider], - # object]`. mw = MetadataWrapper(m, cache={SimpleCacheMetadataProvider: cached_data}) pass_node = cst.ensure_type(mw.module.body[0], cst.SimpleStatementLine).body[0] self.assertEqual( mw.resolve(SimpleCacheMetadataProvider)[pass_node], cached_data ) + + def test_resolve_provider_twice(self) -> None: + """ + Tests that resolving the same provider twice is a no-op + """ + mock = Mock() + + class ProviderA(VisitorMetadataProvider[bool]): + def visit_Pass(self, node: cst.Pass) -> None: + mock.visited_a() + + module = cst.parse_module("pass") + wrapper = MetadataWrapper(module) + + wrapper.resolve(ProviderA) + mock.visited_a.assert_called_once() + + wrapper.resolve(ProviderA) + mock.visited_a.assert_called_once() + + def test_resolve_dependent_provider_twice(self) -> None: + """ + Tests that resolving the same provider twice is a no-op + """ + mock = Mock() + + class ProviderA(VisitorMetadataProvider[bool]): + def visit_Pass(self, node: cst.Pass) -> None: + mock.visited_a() + + class ProviderB(VisitorMetadataProvider[bool]): + METADATA_DEPENDENCIES = (ProviderA,) + + def visit_Pass(self, node: cst.Pass) -> None: + mock.visited_b() + + module = cst.parse_module("pass") + wrapper = MetadataWrapper(module) + + wrapper.resolve(ProviderA) + mock.visited_a.assert_called_once() + + wrapper.resolve(ProviderB) + mock.visited_a.assert_called_once() + mock.visited_b.assert_called_once() + + wrapper.resolve(ProviderA) + mock.visited_a.assert_called_once() + mock.visited_b.assert_called_once() diff --git a/libcst/metadata/tests/test_name_provider.py b/libcst/metadata/tests/test_name_provider.py new file mode 100644 index 00000000..fbd3631a --- /dev/null +++ b/libcst/metadata/tests/test_name_provider.py @@ -0,0 +1,575 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from pathlib import Path +from tempfile import TemporaryDirectory +from textwrap import dedent +from typing import Collection, Dict, Mapping, Optional, Set, Tuple + +import libcst as cst +from libcst import ensure_type +from libcst._nodes.base import CSTNode +from libcst.metadata import ( + FullyQualifiedNameProvider, + MetadataWrapper, + QualifiedName, + QualifiedNameProvider, + QualifiedNameSource, +) +from libcst.metadata.full_repo_manager import FullRepoManager +from libcst.metadata.name_provider import FullyQualifiedNameVisitor +from libcst.testing.utils import data_provider, UnitTest + + +class QNameVisitor(cst.CSTVisitor): + METADATA_DEPENDENCIES = (QualifiedNameProvider,) + + def __init__(self) -> None: + self.qnames: Dict["CSTNode", Collection[QualifiedName]] = {} + + def on_visit(self, node: cst.CSTNode) -> bool: + qname = self.get_metadata(QualifiedNameProvider, node) + self.qnames[node] = qname + return True + + +def get_qualified_name_metadata_provider( + module_str: str, +) -> Tuple[cst.Module, Mapping[cst.CSTNode, Collection[QualifiedName]]]: + wrapper = MetadataWrapper(cst.parse_module(dedent(module_str))) + visitor = QNameVisitor() + wrapper.visit(visitor) + return wrapper.module, visitor.qnames + + +def get_qualified_names(module_str: str) -> Set[QualifiedName]: + _, qnames_map = get_qualified_name_metadata_provider(module_str) + return {qname for qnames in qnames_map.values() for qname in qnames} + + +def get_fully_qualified_names(file_path: str, module_str: str) -> Set[QualifiedName]: + wrapper = cst.MetadataWrapper( + cst.parse_module(dedent(module_str)), + cache={ + FullyQualifiedNameProvider: FullyQualifiedNameProvider.gen_cache( + Path(""), [file_path], timeout=None + ).get(file_path, "") + }, + ) + return { + qname + for qnames in wrapper.resolve(FullyQualifiedNameProvider).values() + for qname in qnames + } + + +class QualifiedNameProviderTest(UnitTest): + def test_imports(self) -> None: + qnames = get_qualified_names( + """ + from a.b import c as d + d + """ + ) + self.assertEqual({"a.b.c"}, {qname.name for qname in qnames}) + for qname in qnames: + self.assertEqual(qname.source, QualifiedNameSource.IMPORT, msg=f"{qname}") + + def test_builtins(self) -> None: + qnames = get_qualified_names( + """ + int(None) + """ + ) + self.assertEqual( + {"builtins.int", "builtins.None"}, {qname.name for qname in qnames} + ) + for qname in qnames: + self.assertEqual(qname.source, QualifiedNameSource.BUILTIN, msg=f"{qname}") + + def test_locals(self) -> None: + qnames = get_qualified_names( + """ + class X: + a: "X" + """ + ) + self.assertEqual({"X", "X.a"}, {qname.name for qname in qnames}) + for qname in qnames: + self.assertEqual(qname.source, QualifiedNameSource.LOCAL, msg=f"{qname}") + + def test_simple_qualified_names(self) -> None: + m, names = get_qualified_name_metadata_provider( + """ + from a.b import c + class Cls: + def f(self) -> "c": + c() + d = {} + d['key'] = 0 + def g(): + pass + g() + """ + ) + cls = ensure_type(m.body[1], cst.ClassDef) + f = ensure_type(cls.body.body[0], cst.FunctionDef) + self.assertEqual( + names[ensure_type(f.returns, cst.Annotation).annotation], + {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)}, + ) + + c_call = ensure_type( + ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr + ).value + self.assertEqual( + names[c_call], {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)} + ) + self.assertEqual( + names[c_call], {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)} + ) + + g_call = ensure_type( + ensure_type(m.body[3], cst.SimpleStatementLine).body[0], cst.Expr + ).value + self.assertEqual(names[g_call], {QualifiedName("g", QualifiedNameSource.LOCAL)}) + d_name = ( + ensure_type( + ensure_type(f.body.body[1], cst.SimpleStatementLine).body[0], cst.Assign + ) + .targets[0] + .target + ) + self.assertEqual( + names[d_name], + {QualifiedName("Cls.f..d", QualifiedNameSource.LOCAL)}, + ) + d_subscript = ( + ensure_type( + ensure_type(f.body.body[2], cst.SimpleStatementLine).body[0], cst.Assign + ) + .targets[0] + .target + ) + self.assertEqual( + names[d_subscript], + {QualifiedName("Cls.f..d", QualifiedNameSource.LOCAL)}, + ) + + def test_nested_qualified_names(self) -> None: + m, names = get_qualified_name_metadata_provider( + """ + class A: + def f1(self): + def f2(): + pass + f2() + + def f3(self): + class B(): + ... + B() + def f4(): + def f5(): + class C: + pass + C() + f5() + """ + ) + + cls_a = ensure_type(m.body[0], cst.ClassDef) + self.assertEqual(names[cls_a], {QualifiedName("A", QualifiedNameSource.LOCAL)}) + func_f1 = ensure_type(cls_a.body.body[0], cst.FunctionDef) + self.assertEqual( + names[func_f1], {QualifiedName("A.f1", QualifiedNameSource.LOCAL)} + ) + func_f2_call = ensure_type( + ensure_type(func_f1.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr + ).value + self.assertEqual( + names[func_f2_call], + {QualifiedName("A.f1..f2", QualifiedNameSource.LOCAL)}, + ) + func_f3 = ensure_type(cls_a.body.body[1], cst.FunctionDef) + self.assertEqual( + names[func_f3], {QualifiedName("A.f3", QualifiedNameSource.LOCAL)} + ) + call_b = ensure_type( + ensure_type(func_f3.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr + ).value + self.assertEqual( + names[call_b], {QualifiedName("A.f3..B", QualifiedNameSource.LOCAL)} + ) + func_f4 = ensure_type(m.body[1], cst.FunctionDef) + self.assertEqual( + names[func_f4], {QualifiedName("f4", QualifiedNameSource.LOCAL)} + ) + func_f5 = ensure_type(func_f4.body.body[0], cst.FunctionDef) + self.assertEqual( + names[func_f5], {QualifiedName("f4..f5", QualifiedNameSource.LOCAL)} + ) + cls_c = func_f5.body.body[0] + self.assertEqual( + names[cls_c], + {QualifiedName("f4..f5..C", QualifiedNameSource.LOCAL)}, + ) + + def test_multiple_assignments(self) -> None: + m, names = get_qualified_name_metadata_provider( + """ + if 1: + from a import b as c + elif 2: + from d import e as c + c() + """ + ) + call = ensure_type( + ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr + ).value + self.assertEqual( + names[call], + { + QualifiedName(name="a.b", source=QualifiedNameSource.IMPORT), + QualifiedName(name="d.e", source=QualifiedNameSource.IMPORT), + }, + ) + + def test_comprehension(self) -> None: + m, names = get_qualified_name_metadata_provider( + """ + class C: + def fn(self) -> None: + [[k for k in i] for i in [j for j in range(10)]] + # Note: + # The qualified name of i is straightforward to be "C.fn...i". + # ListComp j is evaluated outside of the ListComp i. + # so j has qualified name "C.fn...j". + # ListComp k is evaluated inside ListComp i. + # so k has qualified name "C.fn....k". + """ + ) + cls_def = ensure_type(m.body[0], cst.ClassDef) + fn_def = ensure_type(cls_def.body.body[0], cst.FunctionDef) + outer_comp = ensure_type( + ensure_type( + ensure_type(fn_def.body.body[0], cst.SimpleStatementLine).body[0], + cst.Expr, + ).value, + cst.ListComp, + ) + i = outer_comp.for_in.target + self.assertEqual( + names[i], + { + QualifiedName( + name="C.fn...i", + source=QualifiedNameSource.LOCAL, + ) + }, + ) + inner_comp_j = ensure_type(outer_comp.for_in.iter, cst.ListComp) + j = inner_comp_j.for_in.target + self.assertEqual( + names[j], + { + QualifiedName( + name="C.fn...j", + source=QualifiedNameSource.LOCAL, + ) + }, + ) + inner_comp_k = ensure_type(outer_comp.elt, cst.ListComp) + k = inner_comp_k.for_in.target + self.assertEqual( + names[k], + { + QualifiedName( + name="C.fn....k", + source=QualifiedNameSource.LOCAL, + ) + }, + ) + + def test_has_name_helper(self) -> None: + class TestVisitor(cst.CSTVisitor): + METADATA_DEPENDENCIES = (QualifiedNameProvider,) + + def __init__(self, test: UnitTest) -> None: + self.test = test + + def visit_Call(self, node: cst.Call) -> Optional[bool]: + self.test.assertTrue( + QualifiedNameProvider.has_name(self, node, "a.b.c") + ) + self.test.assertFalse(QualifiedNameProvider.has_name(self, node, "a.b")) + self.test.assertTrue( + QualifiedNameProvider.has_name( + self, node, QualifiedName("a.b.c", QualifiedNameSource.IMPORT) + ) + ) + self.test.assertFalse( + QualifiedNameProvider.has_name( + self, node, QualifiedName("a.b.c", QualifiedNameSource.LOCAL) + ) + ) + + MetadataWrapper(cst.parse_module("import a;a.b.c()")).visit(TestVisitor(self)) + + def test_name_in_attribute(self) -> None: + m, names = get_qualified_name_metadata_provider( + """ + obj = object() + obj.eval + """ + ) + attr = ensure_type( + ensure_type( + ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr + ).value, + cst.Attribute, + ) + self.assertEqual( + names[attr], + {QualifiedName(name="obj.eval", source=QualifiedNameSource.LOCAL)}, + ) + eval = attr.attr + self.assertEqual(names[eval], set()) + + def test_repeated_values_in_qualified_name(self) -> None: + m, names = get_qualified_name_metadata_provider( + """ + import a + class Foo: + bar: a.aa.aaa + """ + ) + foo = ensure_type(m.body[1], cst.ClassDef) + bar = ensure_type( + ensure_type( + ensure_type(foo.body, cst.IndentedBlock).body[0], + cst.SimpleStatementLine, + ).body[0], + cst.AnnAssign, + ) + + annotation = ensure_type(bar.annotation, cst.Annotation) + attribute = ensure_type(annotation.annotation, cst.Attribute) + + self.assertEqual( + names[attribute], {QualifiedName("a.aa.aaa", QualifiedNameSource.IMPORT)} + ) + + def test_multiple_qualified_names(self) -> None: + m, names = get_qualified_name_metadata_provider( + """ + if False: + def f(): pass + elif False: + from b import f + else: + import f + import a.b as f + + f() + """ + ) + if_ = ensure_type(m.body[0], cst.If) + first_f = ensure_type(if_.body.body[0], cst.FunctionDef) + second_f_alias = ensure_type( + ensure_type( + ensure_type(if_.orelse, cst.If).body.body[0], + cst.SimpleStatementLine, + ).body[0], + cst.ImportFrom, + ).names + self.assertFalse(isinstance(second_f_alias, cst.ImportStar)) + second_f = second_f_alias[0].name + third_f_alias = ensure_type( + ensure_type( + ensure_type(ensure_type(if_.orelse, cst.If).orelse, cst.Else).body.body[ + 0 + ], + cst.SimpleStatementLine, + ).body[0], + cst.Import, + ).names + self.assertFalse(isinstance(third_f_alias, cst.ImportStar)) + third_f = third_f_alias[0].name + fourth_f = ensure_type( + ensure_type( + ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Import + ) + .names[0] + .asname, + cst.AsName, + ).name + call = ensure_type( + ensure_type( + ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr + ).value, + cst.Call, + ) + + self.assertEqual( + names[first_f], {QualifiedName("f", QualifiedNameSource.LOCAL)} + ) + self.assertEqual(names[second_f], set()) + self.assertEqual(names[third_f], set()) + self.assertEqual(names[fourth_f], set()) + self.assertEqual( + names[call], + { + QualifiedName("f", QualifiedNameSource.IMPORT), + QualifiedName("b.f", QualifiedNameSource.IMPORT), + QualifiedName("f", QualifiedNameSource.LOCAL), + QualifiedName("a.b", QualifiedNameSource.IMPORT), + }, + ) + + def test_shadowed_assignments(self) -> None: + m, names = get_qualified_name_metadata_provider( + """ + from lib import a,b,c + a = a + class Test: + b = b + def func(): + c = c + """ + ) + + # pyre-fixme[53]: Captured variable `names` is not annotated. + def test_name(node: cst.CSTNode, qnames: Set[QualifiedName]) -> None: + name = ensure_type( + ensure_type(node, cst.SimpleStatementLine).body[0], cst.Assign + ).value + self.assertEqual(names[name], qnames) + + test_name(m.body[1], {QualifiedName("lib.a", QualifiedNameSource.IMPORT)}) + + cls = ensure_type(m.body[2], cst.ClassDef) + test_name( + cls.body.body[0], {QualifiedName("lib.b", QualifiedNameSource.IMPORT)} + ) + + func = ensure_type(m.body[3], cst.FunctionDef) + test_name( + func.body.body[0], {QualifiedName("lib.c", QualifiedNameSource.IMPORT)} + ) + + +class FullyQualifiedNameProviderTest(UnitTest): + @data_provider( + ( + # test module names + ("a/b/c.py", "", {"a.b.c": QualifiedNameSource.LOCAL}), + ("a/b.py", "", {"a.b": QualifiedNameSource.LOCAL}), + ("a.py", "", {"a": QualifiedNameSource.LOCAL}), + ("a/b/__init__.py", "", {"a.b": QualifiedNameSource.LOCAL}), + ("a/b/__main__.py", "", {"a.b": QualifiedNameSource.LOCAL}), + # test builtinxsx + ( + "test/module.py", + "int(None)", + { + "test.module": QualifiedNameSource.LOCAL, + "builtins.int": QualifiedNameSource.BUILTIN, + "builtins.None": QualifiedNameSource.BUILTIN, + }, + ), + # test imports + ( + "some/test/module.py", + """ + from a.b import c as d + from . import rel + from .lol import rel2 + from .. import thing as rel3 + d, rel, rel2, rel3 + """, + { + "some.test.module": QualifiedNameSource.LOCAL, + "a.b.c": QualifiedNameSource.IMPORT, + "some.test.rel": QualifiedNameSource.IMPORT, + "some.test.lol.rel2": QualifiedNameSource.IMPORT, + "some.thing": QualifiedNameSource.IMPORT, + }, + ), + # test more imports + ( + "some/test/module/__init__.py", + """ + from . import rel + from .lol import rel2 + rel, rel2 + """, + { + "some.test.module": QualifiedNameSource.LOCAL, + "some.test.module.rel": QualifiedNameSource.IMPORT, + "some.test.module.lol.rel2": QualifiedNameSource.IMPORT, + }, + ), + # test locals + ( + "some/test/module.py", + """ + class X: + a: X + """, + { + "some.test.module": QualifiedNameSource.LOCAL, + "some.test.module.X": QualifiedNameSource.LOCAL, + "some.test.module.X.a": QualifiedNameSource.LOCAL, + }, + ), + ) + ) + def test_qnames( + self, file: str, code: str, names: Dict[str, QualifiedNameSource] + ) -> None: + qnames = get_fully_qualified_names(file, code) + self.assertSetEqual( + set(names.keys()), + {qname.name for qname in qnames}, + ) + for qname in qnames: + self.assertEqual(qname.source, names[qname.name], msg=f"{qname}") + + def test_local_qualification(self) -> None: + module_name = "some.test.module" + package_name = "some.test" + for name, expected in [ + (".foo", "some.test.foo"), + ("..bar", "some.bar"), + ("foo", "some.test.module.foo"), + ]: + with self.subTest(name=name): + self.assertEqual( + FullyQualifiedNameVisitor._fully_qualify_local( + module_name, package_name, name + ), + expected, + ) + + +class FullyQualifiedNameIntegrationTest(UnitTest): + def test_with_full_repo_manager(self) -> None: + with TemporaryDirectory() as dir: + root = Path(dir) + file_path = root / "pkg/mod.py" + file_path.parent.mkdir() + file_path.touch() + + file_path_str = file_path.as_posix() + mgr = FullRepoManager(root, [file_path_str], [FullyQualifiedNameProvider]) + wrapper = mgr.get_metadata_wrapper_for_path(file_path_str) + fqnames = wrapper.resolve(FullyQualifiedNameProvider) + (mod, names) = next(iter(fqnames.items())) + self.assertIsInstance(mod, cst.Module) + self.assertEqual( + names, {QualifiedName(name="pkg.mod", source=QualifiedNameSource.LOCAL)} + ) diff --git a/libcst/metadata/tests/test_parent_node_provider.py b/libcst/metadata/tests/test_parent_node_provider.py index f5f617f6..f04c1051 100644 --- a/libcst/metadata/tests/test_parent_node_provider.py +++ b/libcst/metadata/tests/test_parent_node_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,7 @@ from textwrap import dedent import libcst as cst from libcst.metadata import MetadataWrapper, ParentNodeProvider -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class DependentVisitor(cst.CSTVisitor): diff --git a/libcst/metadata/tests/test_position_provider.py b/libcst/metadata/tests/test_position_provider.py index 03132428..14cecec7 100644 --- a/libcst/metadata/tests/test_position_provider.py +++ b/libcst/metadata/tests/test_position_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -83,6 +83,53 @@ class PositionProviderTest(UnitTest): wrapper = MetadataWrapper(parse_module("pass")) wrapper.visit_batched([ABatchable()]) + def test_match_statement_position_metadata(self) -> None: + test = self + + class MatchPositionVisitor(CSTVisitor): + METADATA_DEPENDENCIES = (PositionProvider,) + + def visit_Match(self, node: cst.Match) -> None: + test.assertEqual( + self.get_metadata(PositionProvider, node), + CodeRange((2, 0), (5, 16)), + ) + + def visit_MatchCase(self, node: cst.MatchCase) -> None: + if ( + isinstance(node.pattern, cst.MatchAs) + and node.pattern.name + and node.pattern.name.value == "b" + ): + test.assertEqual( + self.get_metadata(PositionProvider, node), + CodeRange((3, 4), (3, 16)), + ) + elif ( + isinstance(node.pattern, cst.MatchAs) + and node.pattern.name + and node.pattern.name.value == "c" + ): + test.assertEqual( + self.get_metadata(PositionProvider, node), + CodeRange((4, 4), (4, 16)), + ) + elif isinstance(node.pattern, cst.MatchAs) and not node.pattern.name: + test.assertEqual( + self.get_metadata(PositionProvider, node), + CodeRange((5, 4), (5, 16)), + ) + + code = """ +match status: + case b: pass + case c: pass + case _: pass +""" + + wrapper = MetadataWrapper(parse_module(code)) + wrapper.visit(MatchPositionVisitor()) + class PositionProvidingCodegenStateTest(UnitTest): def test_codegen_initial_position(self) -> None: diff --git a/libcst/metadata/tests/test_qualified_name_provider.py b/libcst/metadata/tests/test_qualified_name_provider.py deleted file mode 100644 index cf7fa68b..00000000 --- a/libcst/metadata/tests/test_qualified_name_provider.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from textwrap import dedent -from typing import Collection, Mapping, Optional, Tuple - -import libcst as cst -from libcst import ensure_type -from libcst.metadata import ( - MetadataWrapper, - QualifiedName, - QualifiedNameProvider, - QualifiedNameSource, -) -from libcst.testing.utils import UnitTest - - -def get_qualified_name_metadata_provider( - module_str: str, -) -> Tuple[cst.Module, Mapping[cst.CSTNode, Collection[QualifiedName]]]: - wrapper = MetadataWrapper(cst.parse_module(dedent(module_str))) - return wrapper.module, wrapper.resolve(QualifiedNameProvider) - - -class ScopeProviderTest(UnitTest): - def test_simple_qualified_names(self) -> None: - m, names = get_qualified_name_metadata_provider( - """ - from a.b import c - class Cls: - def f(self) -> "c": - c() - d = {} - d['key'] = 0 - def g(): - pass - g() - """ - ) - cls = ensure_type(m.body[1], cst.ClassDef) - f = ensure_type(cls.body.body[0], cst.FunctionDef) - self.assertEqual( - names[ensure_type(f.returns, cst.Annotation).annotation], set() - ) - - c_call = ensure_type( - ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], cst.Expr - ).value - self.assertEqual( - names[c_call], {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)} - ) - self.assertEqual( - names[c_call], {QualifiedName("a.b.c", QualifiedNameSource.IMPORT)} - ) - - g_call = ensure_type( - ensure_type(m.body[3], cst.SimpleStatementLine).body[0], cst.Expr - ).value - self.assertEqual(names[g_call], {QualifiedName("g", QualifiedNameSource.LOCAL)}) - d_name = ( - ensure_type( - ensure_type(f.body.body[1], cst.SimpleStatementLine).body[0], cst.Assign - ) - .targets[0] - .target - ) - self.assertEqual( - names[d_name], - {QualifiedName("Cls.f..d", QualifiedNameSource.LOCAL)}, - ) - d_subscript = ( - ensure_type( - ensure_type(f.body.body[2], cst.SimpleStatementLine).body[0], cst.Assign - ) - .targets[0] - .target - ) - self.assertEqual( - names[d_subscript], - {QualifiedName("Cls.f..d", QualifiedNameSource.LOCAL)}, - ) - - def test_nested_qualified_names(self) -> None: - m, names = get_qualified_name_metadata_provider( - """ - class A: - def f1(self): - def f2(): - pass - f2() - - def f3(self): - class B(): - ... - B() - def f4(): - def f5(): - class C: - pass - C() - f5() - """ - ) - - cls_a = ensure_type(m.body[0], cst.ClassDef) - self.assertEqual(names[cls_a], {QualifiedName("A", QualifiedNameSource.LOCAL)}) - func_f1 = ensure_type(cls_a.body.body[0], cst.FunctionDef) - self.assertEqual( - names[func_f1], {QualifiedName("A.f1", QualifiedNameSource.LOCAL)} - ) - func_f2_call = ensure_type( - ensure_type(func_f1.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr - ).value - self.assertEqual( - names[func_f2_call], - {QualifiedName("A.f1..f2", QualifiedNameSource.LOCAL)}, - ) - func_f3 = ensure_type(cls_a.body.body[1], cst.FunctionDef) - self.assertEqual( - names[func_f3], {QualifiedName("A.f3", QualifiedNameSource.LOCAL)} - ) - call_b = ensure_type( - ensure_type(func_f3.body.body[1], cst.SimpleStatementLine).body[0], cst.Expr - ).value - self.assertEqual( - names[call_b], {QualifiedName("A.f3..B", QualifiedNameSource.LOCAL)} - ) - func_f4 = ensure_type(m.body[1], cst.FunctionDef) - self.assertEqual( - names[func_f4], {QualifiedName("f4", QualifiedNameSource.LOCAL)} - ) - func_f5 = ensure_type(func_f4.body.body[0], cst.FunctionDef) - self.assertEqual( - names[func_f5], {QualifiedName("f4..f5", QualifiedNameSource.LOCAL)} - ) - cls_c = func_f5.body.body[0] - self.assertEqual( - names[cls_c], - {QualifiedName("f4..f5..C", QualifiedNameSource.LOCAL)}, - ) - - def test_multiple_assignments(self) -> None: - m, names = get_qualified_name_metadata_provider( - """ - if 1: - from a import b as c - elif 2: - from d import e as c - c() - """ - ) - call = ensure_type( - ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr - ).value - self.assertEqual( - names[call], - { - QualifiedName(name="a.b", source=QualifiedNameSource.IMPORT), - QualifiedName(name="d.e", source=QualifiedNameSource.IMPORT), - }, - ) - - def test_comprehension(self) -> None: - m, names = get_qualified_name_metadata_provider( - """ - class C: - def fn(self) -> None: - [[k for k in i] for i in [j for j in range(10)]] - # Note: - # The qualified name of i is straightforward to be "C.fn...i". - # ListComp j is evaluated outside of the ListComp i. - # so j has qualified name "C.fn...j". - # ListComp k is evaluated inside ListComp i. - # so k has qualified name "C.fn....k". - """ - ) - cls_def = ensure_type(m.body[0], cst.ClassDef) - fn_def = ensure_type(cls_def.body.body[0], cst.FunctionDef) - outer_comp = ensure_type( - ensure_type( - ensure_type(fn_def.body.body[0], cst.SimpleStatementLine).body[0], - cst.Expr, - ).value, - cst.ListComp, - ) - i = outer_comp.for_in.target - self.assertEqual( - names[i], - { - QualifiedName( - name="C.fn...i", - source=QualifiedNameSource.LOCAL, - ) - }, - ) - inner_comp_j = ensure_type(outer_comp.for_in.iter, cst.ListComp) - j = inner_comp_j.for_in.target - self.assertEqual( - names[j], - { - QualifiedName( - name="C.fn...j", - source=QualifiedNameSource.LOCAL, - ) - }, - ) - inner_comp_k = ensure_type(outer_comp.elt, cst.ListComp) - k = inner_comp_k.for_in.target - self.assertEqual( - names[k], - { - QualifiedName( - name="C.fn....k", - source=QualifiedNameSource.LOCAL, - ) - }, - ) - - def test_has_name_helper(self) -> None: - class TestVisitor(cst.CSTVisitor): - METADATA_DEPENDENCIES = (QualifiedNameProvider,) - - def __init__(self, test: UnitTest) -> None: - self.test = test - - def visit_Call(self, node: cst.Call) -> Optional[bool]: - self.test.assertTrue( - QualifiedNameProvider.has_name(self, node, "a.b.c") - ) - self.test.assertFalse(QualifiedNameProvider.has_name(self, node, "a.b")) - self.test.assertTrue( - QualifiedNameProvider.has_name( - self, node, QualifiedName("a.b.c", QualifiedNameSource.IMPORT) - ) - ) - self.test.assertFalse( - QualifiedNameProvider.has_name( - self, node, QualifiedName("a.b.c", QualifiedNameSource.LOCAL) - ) - ) - - MetadataWrapper(cst.parse_module("import a;a.b.c()")).visit(TestVisitor(self)) - - def test_name_in_attribute(self) -> None: - m, names = get_qualified_name_metadata_provider( - """ - obj = object() - obj.eval - """ - ) - attr = ensure_type( - ensure_type( - ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr - ).value, - cst.Attribute, - ) - self.assertEqual( - names[attr], - {QualifiedName(name="obj.eval", source=QualifiedNameSource.LOCAL)}, - ) - eval = attr.attr - self.assertEqual(names[eval], set()) - - def test_repeated_values_in_qualified_name(self) -> None: - m, names = get_qualified_name_metadata_provider( - """ - import a - class Foo: - bar: a.aa.aaa - """ - ) - foo = ensure_type(m.body[1], cst.ClassDef) - bar = ensure_type( - ensure_type( - ensure_type(foo.body, cst.IndentedBlock).body[0], - cst.SimpleStatementLine, - ).body[0], - cst.AnnAssign, - ) - - annotation = ensure_type(bar.annotation, cst.Annotation) - attribute = ensure_type(annotation.annotation, cst.Attribute) - - self.assertEqual( - names[attribute], {QualifiedName("a.aa.aaa", QualifiedNameSource.IMPORT)} - ) diff --git a/libcst/metadata/tests/test_reentrant_codegen.py b/libcst/metadata/tests/test_reentrant_codegen.py index 8e6abeab..c76b2828 100644 --- a/libcst/metadata/tests/test_reentrant_codegen.py +++ b/libcst/metadata/tests/test_reentrant_codegen.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,7 +9,7 @@ from typing import Callable import libcst as cst from libcst.metadata import ExperimentalReentrantCodegenProvider, MetadataWrapper -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class ExperimentalReentrantCodegenProviderTest(UnitTest): diff --git a/libcst/metadata/tests/test_scope_provider.py b/libcst/metadata/tests/test_scope_provider.py index 36fd19e5..a367de39 100644 --- a/libcst/metadata/tests/test_scope_provider.py +++ b/libcst/metadata/tests/test_scope_provider.py @@ -1,29 +1,35 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. +import sys from textwrap import dedent -from typing import Mapping, Tuple, cast +from typing import cast, Mapping, Sequence, Tuple +from unittest import mock import libcst as cst from libcst import ensure_type from libcst.metadata import MetadataWrapper from libcst.metadata.scope_provider import ( + _gen_dotted_names, + AnnotationScope, Assignment, + BuiltinAssignment, + BuiltinScope, ClassScope, ComprehensionScope, FunctionScope, GlobalScope, + ImportAssignment, LocalScope, QualifiedName, QualifiedNameSource, Scope, ScopeProvider, - _gen_dotted_names, ) -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class DependentVisitor(cst.CSTVisitor): @@ -143,6 +149,11 @@ class ScopeProviderTest(UnitTest): self.assertEqual(len(scope_of_module[builtin]), 1) self.assertEqual(len(scope_of_module["something_not_a_builtin"]), 0) + scope_of_builtin = scope_of_module.parent + self.assertIsInstance(scope_of_builtin, BuiltinScope) + self.assertEqual(len(scope_of_builtin[builtin]), 1) + self.assertEqual(len(scope_of_builtin["something_not_a_builtin"]), 0) + func_body = ensure_type(m.body[0], cst.FunctionDef).body func_pass_statement = func_body.body[0] scope_of_func_statement = scopes[func_pass_statement] @@ -183,17 +194,30 @@ class ScopeProviderTest(UnitTest): len(scope_of_module[in_scope]), 1, f"{in_scope} should be in scope." ) - assignment = cast(Assignment, list(scope_of_module[in_scope])[0]) + assignment = cast(ImportAssignment, list(scope_of_module[in_scope])[0]) self.assertEqual( assignment.name, in_scope, - f"Assignment name {assignment.name} should equal to {in_scope}.", + f"ImportAssignment name {assignment.name} should equal to {in_scope}.", ) import_node = ensure_type(m.body[idx], cst.SimpleStatementLine).body[0] self.assertEqual( assignment.node, import_node, - f"The node of Assignment {assignment.node} should equal to {import_node}", + f"The node of ImportAssignment {assignment.node} should equal to {import_node}", + ) + self.assertTrue(isinstance(import_node, (cst.Import, cst.ImportFrom))) + + names = import_node.names + + self.assertFalse(isinstance(names, cst.ImportStar)) + + alias = names[0] + as_name = alias.asname.name if alias.asname else alias.name + self.assertEqual( + assignment.as_name, + as_name, + f"The alias name of ImportAssignment {assignment.as_name} should equal to {as_name}", ) def test_dotted_import_access(self) -> None: @@ -212,7 +236,7 @@ class ScopeProviderTest(UnitTest): self.assertTrue("a" in scope_of_module) self.assertEqual(scope_of_module.accesses["a"], set()) - a_b_c_assignment = cast(Assignment, list(scope_of_module["a.b.c"])[0]) + a_b_c_assignment = cast(ImportAssignment, list(scope_of_module["a.b.c"])[0]) a_b_c_access = list(a_b_c_assignment.references)[0] self.assertEqual(scope_of_module.accesses["a.b.c"], {a_b_c_access}) self.assertEqual(a_b_c_access.node, call.func) @@ -228,6 +252,45 @@ class ScopeProviderTest(UnitTest): self.assertEqual(list(scope_of_module["x.y"])[0].references, set()) self.assertEqual(scope_of_module.accesses["x.y"], set()) + def test_dotted_import_access_reference_by_node(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + import a.b.c + a.b.c() + """ + ) + scope_of_module = scopes[m] + first_statement = ensure_type(m.body[1], cst.SimpleStatementLine) + call = ensure_type( + ensure_type(first_statement.body[0], cst.Expr).value, cst.Call + ) + + a_b_c_assignment = cast(ImportAssignment, list(scope_of_module["a.b.c"])[0]) + a_b_c_access = list(a_b_c_assignment.references)[0] + self.assertEqual(scope_of_module.accesses[call], {a_b_c_access}) + self.assertEqual(a_b_c_access.node, call.func) + + def test_decorator_access_reference_by_node(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + import decorator + + @decorator + def f(): + pass + """ + ) + scope_of_module = scopes[m] + function_def = ensure_type(m.body[1], cst.FunctionDef) + decorator = function_def.decorators[0] + self.assertTrue("decorator" in scope_of_module) + + decorator_assignment = cast( + ImportAssignment, list(scope_of_module["decorator"])[0] + ) + decorator_access = list(decorator_assignment.references)[0] + self.assertEqual(scope_of_module.accesses[decorator], {decorator_access}) + def test_dotted_import_with_call_access(self) -> None: m, scopes = get_scope_metadata_provider( """ @@ -252,7 +315,9 @@ class ScopeProviderTest(UnitTest): self.assertTrue("os.path" in scope_of_module) self.assertTrue("os" in scope_of_module) - os_path_join_assignment = cast(Assignment, list(scope_of_module["os.path"])[0]) + os_path_join_assignment = cast( + ImportAssignment, list(scope_of_module["os.path"])[0] + ) os_path_join_assignment_references = list(os_path_join_assignment.references) self.assertNotEqual(len(os_path_join_assignment_references), 0) os_path_join_access = os_path_join_assignment_references[0] @@ -280,21 +345,42 @@ class ScopeProviderTest(UnitTest): for alias in import_aliases: self.assertEqual(scopes[alias], scope_of_module) - for idx, in_scope in [(0, "a"), (0, "b_renamed"), (1, "c"), (2, "d")]: + for idx, in_scope, imported_object_idx in [ + (0, "a", 0), + (0, "b_renamed", 1), + (1, "c", 0), + (2, "d", 0), + ]: self.assertEqual( len(scope_of_module[in_scope]), 1, f"{in_scope} should be in scope." ) - import_assignment = cast(Assignment, list(scope_of_module[in_scope])[0]) + import_assignment = cast( + ImportAssignment, list(scope_of_module[in_scope])[0] + ) self.assertEqual( import_assignment.name, in_scope, - f"The name of Assignment {import_assignment.name} should equal to {in_scope}.", + f"The name of ImportAssignment {import_assignment.name} should equal to {in_scope}.", ) import_node = ensure_type(m.body[idx], cst.SimpleStatementLine).body[0] self.assertEqual( import_assignment.node, import_node, - f"The node of Assignment {import_assignment.node} should equal to {import_node}", + f"The node of ImportAssignment {import_assignment.node} should equal to {import_node}", + ) + + self.assertTrue(isinstance(import_node, (cst.Import, cst.ImportFrom))) + + names = import_node.names + + self.assertFalse(isinstance(names, cst.ImportStar)) + + alias = names[imported_object_idx] + as_name = alias.asname.name if alias.asname else alias.name + self.assertEqual( + import_assignment.as_name, + as_name, + f"The alias name of ImportAssignment {import_assignment.as_name} should equal to {as_name}", ) for not_in_scope in ["foo", "bar", "foo.bar", "b"]: @@ -469,31 +555,39 @@ class ScopeProviderTest(UnitTest): self.assertIs(scopes[inner_for_in.target], scope_of_list_comp) def test_global_scope_overwrites(self) -> None: - m, scopes = get_scope_metadata_provider( + codes = ( """ class Cls: def f(): global var var = ... + """, """ + class Cls: + def f(): + global var + import f as var + """, ) - scope_of_module = scopes[m] - self.assertIsInstance(scope_of_module, GlobalScope) - self.assertTrue("var" in scope_of_module) + for code in codes: + m, scopes = get_scope_metadata_provider(code) + scope_of_module = scopes[m] + self.assertIsInstance(scope_of_module, GlobalScope) + self.assertTrue("var" in scope_of_module) - cls = ensure_type(m.body[0], cst.ClassDef) - scope_of_cls = scopes[cls.body.body[0]] - self.assertIsInstance(scope_of_cls, ClassScope) - self.assertTrue("var" in scope_of_cls) + cls = ensure_type(m.body[0], cst.ClassDef) + scope_of_cls = scopes[cls.body.body[0]] + self.assertIsInstance(scope_of_cls, ClassScope) + self.assertTrue("var" in scope_of_cls) - f = ensure_type(cls.body.body[0], cst.FunctionDef) - scope_of_f = scopes[f.body.body[0]] - self.assertIsInstance(scope_of_f, FunctionScope) - self.assertTrue("var" in scope_of_f) - self.assertEqual(scope_of_f["var"], scope_of_module["var"]) + f = ensure_type(cls.body.body[0], cst.FunctionDef) + scope_of_f = scopes[f.body.body[0]] + self.assertIsInstance(scope_of_f, FunctionScope) + self.assertTrue("var" in scope_of_f) + self.assertEqual(scope_of_f["var"], scope_of_module["var"]) def test_nonlocal_scope_overwrites(self) -> None: - m, scopes = get_scope_metadata_provider( + codes = ( """ def outer_f(): var = ... @@ -502,46 +596,74 @@ class ScopeProviderTest(UnitTest): def inner_f(): nonlocal var var = ... + """, """ + def outer_f(): + import f as var + class Cls: + var = ... + def inner_f(): + nonlocal var + var = ... + """, + """ + def outer_f(): + var = ... + class Cls: + var = ... + def inner_f(): + nonlocal var + import f as var + """, ) - scope_of_module = scopes[m] - self.assertIsInstance(scope_of_module, GlobalScope) - self.assertTrue("var" not in scope_of_module) + for code in codes: + m, scopes = get_scope_metadata_provider(code) + scope_of_module = scopes[m] + self.assertIsInstance(scope_of_module, GlobalScope) + self.assertTrue("var" not in scope_of_module) - outer_f = ensure_type(m.body[0], cst.FunctionDef) - outer_f_body_var_assign = ensure_type( - ensure_type(outer_f.body.body[0], cst.SimpleStatementLine).body[0], - cst.Assign, - ) - scope_of_outer_f = scopes[outer_f_body_var_assign] - self.assertIsInstance(scope_of_outer_f, FunctionScope) - self.assertTrue("var" in scope_of_outer_f) - self.assertEqual(len(scope_of_outer_f["var"]), 2) + outer_f = ensure_type(m.body[0], cst.FunctionDef) + outer_f_body_var = ensure_type( + ensure_type(outer_f.body.body[0], cst.SimpleStatementLine).body[0], + cst.CSTNode, + ) + scope_of_outer_f = scopes[outer_f_body_var] + self.assertIsInstance(scope_of_outer_f, FunctionScope) + self.assertTrue("var" in scope_of_outer_f) + self.assertEqual(len(scope_of_outer_f["var"]), 2) - cls = ensure_type(outer_f.body.body[1], cst.ClassDef) - scope_of_cls = scopes[cls.body.body[0]] - self.assertIsInstance(scope_of_cls, ClassScope) - self.assertTrue("var" in scope_of_cls) + cls = ensure_type(outer_f.body.body[1], cst.ClassDef) + scope_of_cls = scopes[cls.body.body[0]] + self.assertIsInstance(scope_of_cls, ClassScope) + self.assertTrue("var" in scope_of_cls) - inner_f = ensure_type(cls.body.body[1], cst.FunctionDef) - inner_f_body_var_assign = ensure_type( - ensure_type(inner_f.body.body[1], cst.SimpleStatementLine).body[0], - cst.Assign, - ) - scope_of_inner_f = scopes[inner_f_body_var_assign] - self.assertIsInstance(scope_of_inner_f, FunctionScope) - self.assertTrue("var" in scope_of_inner_f) - self.assertEqual(len(scope_of_inner_f["var"]), 2) - self.assertEqual( - { - cast(Assignment, assignment).node - for assignment in scope_of_outer_f["var"] - }, - { - outer_f_body_var_assign.targets[0].target, - inner_f_body_var_assign.targets[0].target, - }, - ) + inner_f = ensure_type(cls.body.body[1], cst.FunctionDef) + inner_f_body_var = ensure_type( + ensure_type(inner_f.body.body[1], cst.SimpleStatementLine).body[0], + cst.CSTNode, + ) + scope_of_inner_f = scopes[inner_f_body_var] + self.assertIsInstance(scope_of_inner_f, FunctionScope) + self.assertTrue("var" in scope_of_inner_f) + self.assertEqual(len(scope_of_inner_f["var"]), 2) + self.assertEqual( + { + cast(Assignment, assignment).node + for assignment in scope_of_outer_f["var"] + }, + { + ( + outer_f_body_var.targets[0].target + if isinstance(outer_f_body_var, cst.Assign) + else outer_f_body_var + ), + ( + inner_f_body_var.targets[0].target + if isinstance(inner_f_body_var, cst.Assign) + else inner_f_body_var + ), + }, + ) def test_local_scope_shadowing_with_functions(self) -> None: m, scopes = get_scope_metadata_provider( @@ -866,6 +988,25 @@ class ScopeProviderTest(UnitTest): {QualifiedName("f4..f5..C", QualifiedNameSource.LOCAL)}, ) + def test_get_qualified_names_for_the_same_prefix(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + from a import b, bc + bc() + """ + ) + call = ensure_type( + ensure_type( + ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Expr + ).value, + cst.Call, + ) + module_scope = scopes[m] + self.assertEqual( + module_scope.get_qualified_names_for(call.func), + {QualifiedName("a.bc", QualifiedNameSource.IMPORT)}, + ) + def test_get_qualified_names_for_dotted_imports(self) -> None: m, scopes = get_scope_metadata_provider( """ @@ -1038,7 +1179,7 @@ class ScopeProviderTest(UnitTest): m, scopes = get_scope_metadata_provider( """ from typing import Literal, NewType, Optional, TypeVar, Callable, cast - from a import A, B, C, D, D2, E, E2, F, G, G2, H, I, J, K, K2 + from a import A, B, C, D, D2, E, E2, F, G, G2, H, I, J, K, K2, L, M def x(a: A): pass def y(b: "B"): @@ -1054,7 +1195,8 @@ class ScopeProviderTest(UnitTest): class Test(Generic[J]): pass - casted = cast("K", "K2") + castedK = cast("K", "K2") + castedL = cast("L", M) """ ) imp = ensure_type( @@ -1073,6 +1215,10 @@ class ScopeProviderTest(UnitTest): self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertTrue(references[0].is_annotation) + reference_node = references[0].node + self.assertIsInstance(reference_node, cst.SimpleString) + if isinstance(reference_node, cst.SimpleString): + self.assertEqual(reference_node.evaluated_value, "B") assignment = list(scope["C"])[0] self.assertIsInstance(assignment, Assignment) @@ -1095,6 +1241,10 @@ class ScopeProviderTest(UnitTest): references = list(assignment.references) self.assertFalse(references[0].is_annotation) self.assertTrue(references[0].is_type_hint) + reference_node = references[0].node + self.assertIsInstance(reference_node, cst.SimpleString) + if isinstance(reference_node, cst.SimpleString): + self.assertEqual(reference_node.evaluated_value, "E") assignment = list(scope["E2"])[0] self.assertIsInstance(assignment, Assignment) @@ -1110,6 +1260,10 @@ class ScopeProviderTest(UnitTest): references = list(assignment.references) self.assertFalse(references[0].is_annotation) self.assertTrue(references[0].is_type_hint) + reference_node = references[0].node + self.assertIsInstance(reference_node, cst.SimpleString) + if isinstance(reference_node, cst.SimpleString): + self.assertEqual(reference_node.evaluated_value, "Optional[G]") assignment = list(scope["G2"])[0] self.assertIsInstance(assignment, Assignment) @@ -1121,6 +1275,10 @@ class ScopeProviderTest(UnitTest): references = list(assignment.references) self.assertFalse(references[0].is_annotation) self.assertTrue(references[0].is_type_hint) + reference_node = references[0].node + self.assertIsInstance(reference_node, cst.SimpleString) + if isinstance(reference_node, cst.SimpleString): + self.assertEqual(reference_node.evaluated_value, "H") assignment = list(scope["I"])[0] self.assertIsInstance(assignment, Assignment) @@ -1139,11 +1297,77 @@ class ScopeProviderTest(UnitTest): self.assertEqual(len(assignment.references), 1) references = list(assignment.references) self.assertFalse(references[0].is_annotation) + reference_node = references[0].node + self.assertIsInstance(reference_node, cst.SimpleString) + if isinstance(reference_node, cst.SimpleString): + self.assertEqual(reference_node.evaluated_value, "K") assignment = list(scope["K2"])[0] self.assertIsInstance(assignment, Assignment) self.assertEqual(len(assignment.references), 0) + assignment = list(scope["L"])[0] + self.assertIsInstance(assignment, Assignment) + self.assertEqual(len(assignment.references), 1) + references = list(assignment.references) + reference_node = references[0].node + self.assertIsInstance(reference_node, cst.SimpleString) + if isinstance(reference_node, cst.SimpleString): + self.assertEqual(reference_node.evaluated_value, "L") + + assignment = list(scope["M"])[0] + self.assertIsInstance(assignment, Assignment) + self.assertEqual(len(assignment.references), 1) + references = list(assignment.references) + + def test_insane_annotation_access(self) -> None: + m, scopes = get_scope_metadata_provider( + r""" + from typing import TypeVar, Optional + from a import G + TypeVar("G2", bound="Optional[\"G\"]") + """ + ) + imp = ensure_type( + ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.ImportFrom + ) + call = ensure_type( + ensure_type( + ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr + ).value, + cst.Call, + ) + bound = call.args[1].value + scope = scopes[imp] + assignment = next(iter(scope["G"])) + self.assertIsInstance(assignment, Assignment) + self.assertEqual(len(assignment.references), 1) + self.assertEqual(list(assignment.references)[0].node, bound) + + def test_dotted_annotation_access(self) -> None: + m, scopes = get_scope_metadata_provider( + r""" + from typing import TypeVar + import a.G + TypeVar("G2", bound="a.G") + """ + ) + imp = ensure_type( + ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Import + ) + call = ensure_type( + ensure_type( + ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.Expr + ).value, + cst.Call, + ) + bound = call.args[1].value + scope = scopes[imp] + assignment = next(iter(scope["a.G"])) + self.assertIsInstance(assignment, Assignment) + self.assertEqual(len(assignment.references), 1) + self.assertEqual(list(assignment.references)[0].node, bound) + def test_node_of_scopes(self) -> None: m, scopes = get_scope_metadata_provider( """ @@ -1265,35 +1489,33 @@ class ScopeProviderTest(UnitTest): def test_global_contains_is_read_only(self) -> None: gscope = GlobalScope() - before_assignments = list(gscope._assignments.items()) - before_accesses = list(gscope._accesses.items()) + before_assignments = list(gscope.assignments) + before_accesses = list(gscope.accesses) self.assertFalse("doesnt_exist" in gscope) - self.assertEqual(list(gscope._accesses.items()), before_accesses) - self.assertEqual(list(gscope._assignments.items()), before_assignments) + self.assertEqual(list(gscope.accesses), before_accesses) + self.assertEqual(list(gscope.assignments), before_assignments) def test_contains_is_read_only(self) -> None: for s in [LocalScope, FunctionScope, ClassScope, ComprehensionScope]: with self.subTest(scope=s): gscope = GlobalScope() scope = s(parent=gscope, node=cst.Name("lol")) - before_assignments = list(scope._assignments.items()) - before_accesses = list(scope._accesses.items()) + before_assignments = list(scope.assignments) + before_accesses = list(scope.accesses) before_overwrites = list(scope._scope_overwrites.items()) - before_parent_assignments = list(scope.parent._assignments.items()) - before_parent_accesses = list(scope.parent._accesses.items()) + before_parent_assignments = list(scope.parent.assignments) + before_parent_accesses = list(scope.parent.accesses) self.assertFalse("doesnt_exist" in scope) - self.assertEqual(list(scope._accesses.items()), before_accesses) - self.assertEqual(list(scope._assignments.items()), before_assignments) + self.assertEqual(list(scope.accesses), before_accesses) + self.assertEqual(list(scope.assignments), before_assignments) self.assertEqual( list(scope._scope_overwrites.items()), before_overwrites ) self.assertEqual( - list(scope.parent._assignments.items()), before_parent_assignments - ) - self.assertEqual( - list(scope.parent._accesses.items()), before_parent_accesses + list(scope.parent.assignments), before_parent_assignments ) + self.assertEqual(list(scope.parent.accesses), before_parent_accesses) def test_attribute_of_function_call(self) -> None: get_scope_metadata_provider("foo().bar") @@ -1316,11 +1538,11 @@ class ScopeProviderTest(UnitTest): ) a = m.body[0] scope = scopes[a] - assignments_len_before = len(scope._assignments) - accesses_len_before = len(scope._accesses) + assignments_before = list(scope.assignments) + accesses_before = list(scope.accesses) scope.get_qualified_names_for("doesnt_exist") - self.assertEqual(len(scope._assignments), assignments_len_before) - self.assertEqual(len(scope._accesses), accesses_len_before) + self.assertEqual(list(scope.assignments), assignments_before) + self.assertEqual(list(scope.accesses), accesses_before) def test_gen_dotted_names(self) -> None: names = {name for name, node in _gen_dotted_names(cst.Name(value="a"))} @@ -1370,19 +1592,20 @@ class ScopeProviderTest(UnitTest): first_assignment = list(global_scope.assignments)[0] assert isinstance(first_assignment, cst.metadata.Assignment) self.assertEqual(first_assignment.node, import_stmt) - global_refs = list(first_assignment.references) + global_refs = first_assignment.references self.assertEqual(len(global_refs), 2) + global_refs_nodes = {x.node for x in global_refs} class_def = ensure_type(m.body[1], cst.ClassDef) x = ensure_type( ensure_type(class_def.body.body[0], cst.SimpleStatementLine).body[0], cst.Assign, ) - self.assertEqual(x.value, global_refs[0].node) + self.assertIn(x.value, global_refs_nodes) class_b = ensure_type( ensure_type(class_def.body.body[1], cst.SimpleStatementLine).body[0], cst.Assign, ) - self.assertEqual(class_b.value, global_refs[1].node) + self.assertIn(class_b.value, global_refs_nodes) class_accesses = list(scopes[x].accesses) self.assertEqual(len(class_accesses), 3) @@ -1454,3 +1677,563 @@ class ScopeProviderTest(UnitTest): b_global_refs = list(b_global_assignment.references) self.assertEqual(len(b_global_refs), 1) self.assertEqual(b_global_refs[0].node, second_print.args[0].value) + + def test_ordering_comprehension(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + def f(a): + [a for a in [] for b in a] + [b for a in [] for b in a] + [a for a in [] for a in []] + a = 1 + """ + ) + f = cst.ensure_type(m.body[0], cst.FunctionDef) + a_param = f.params.params[0].name + a_param_assignment = list(scopes[a_param]["a"])[0] + a_param_refs = list(a_param_assignment.references) + self.assertEqual(a_param_refs, []) + first_comp = cst.ensure_type( + cst.ensure_type( + cst.ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], + cst.Expr, + ).value, + cst.ListComp, + ) + a_comp_assignment = list(scopes[first_comp.elt]["a"])[0] + self.assertEqual(len(a_comp_assignment.references), 2) + self.assertIn( + first_comp.elt, [ref.node for ref in a_comp_assignment.references] + ) + + second_comp = cst.ensure_type( + cst.ensure_type( + cst.ensure_type(f.body.body[1], cst.SimpleStatementLine).body[0], + cst.Expr, + ).value, + cst.ListComp, + ) + b_comp_assignment = list(scopes[second_comp.elt]["b"])[0] + self.assertEqual(len(b_comp_assignment.references), 1) + a_second_comp_assignment = list(scopes[second_comp.elt]["a"])[0] + self.assertEqual(len(a_second_comp_assignment.references), 1) + + third_comp = cst.ensure_type( + cst.ensure_type( + cst.ensure_type(f.body.body[2], cst.SimpleStatementLine).body[0], + cst.Expr, + ).value, + cst.ListComp, + ) + a_third_comp_assignments = list(scopes[third_comp.elt]["a"]) + self.assertEqual(len(a_third_comp_assignments), 2) + a_third_comp_access = list(scopes[third_comp.elt].accesses)[0] + self.assertEqual(a_third_comp_access.node, third_comp.elt) + # We record both assignments because it's impossible to know which one + # the access refers to without running the program + self.assertEqual(len(a_third_comp_access.referents), 2) + inner_for_in = third_comp.for_in.inner_for_in + self.assertIsNotNone(inner_for_in) + if inner_for_in: + self.assertIn( + inner_for_in.target, + { + ref.node + for ref in a_third_comp_access.referents + if isinstance(ref, Assignment) + }, + ) + + a_global = ( + cst.ensure_type( + cst.ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.Assign + ) + .targets[0] + .target + ) + a_global_assignment = list(scopes[a_global]["a"])[0] + a_global_refs = list(a_global_assignment.references) + self.assertEqual(a_global_refs, []) + + def test_ordering_comprehension_confusing(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + def f(a): + [a for a in a] + a = 1 + """ + ) + f = cst.ensure_type(m.body[0], cst.FunctionDef) + a_param = f.params.params[0].name + a_param_assignment = list(scopes[a_param]["a"])[0] + a_param_refs = list(a_param_assignment.references) + self.assertEqual(len(a_param_refs), 1) + comp = cst.ensure_type( + cst.ensure_type( + cst.ensure_type(f.body.body[0], cst.SimpleStatementLine).body[0], + cst.Expr, + ).value, + cst.ListComp, + ) + a_comp_assignment = list(scopes[comp.elt]["a"])[0] + self.assertEqual(list(a_param_refs)[0].node, comp.for_in.iter) + self.assertEqual(len(a_comp_assignment.references), 1) + self.assertEqual(list(a_comp_assignment.references)[0].node, comp.elt) + + def test_for_scope_ordering(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + def f(): + for x in []: + x + class X: + def f(): + for x in []: + x + """ + ) + for scope in scopes.values(): + for acc in scope.accesses: + self.assertEqual( + len(acc.referents), + 1, + msg=( + "Access for node has incorrect number of referents: " + + f"{acc.node}" + ), + ) + + def test_no_out_of_order_references_in_global_scope(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + x = y + y = 1 + """ + ) + for scope in scopes.values(): + for acc in scope.accesses: + self.assertEqual( + len(acc.referents), + 0, + msg=( + "Access for node has incorrect number of referents: " + + f"{acc.node}" + ), + ) + + def test_walrus_accesses(self) -> None: + if sys.version_info < (3, 8): + self.skipTest("This python version doesn't support :=") + m, scopes = get_scope_metadata_provider( + """ + if x := y: + y = 1 + x + """ + ) + for scope in scopes.values(): + for acc in scope.accesses: + self.assertEqual( + len(acc.referents), + 1 if getattr(acc.node, "value", None) == "x" else 0, + msg=( + "Access for node has incorrect number of referents: " + + f"{acc.node}" + ), + ) + + @data_provider( + { + "TypeVar": { + "code": """ + from typing import TypeVar + TypeVar("Name", "int") + """, + "calls": [mock.call("int")], + }, + "Dict": { + "code": """ + from typing import Dict + Dict["str", "int"] + """, + "calls": [mock.call("str"), mock.call("int")], + }, + "cast_no_annotation": { + "code": """ + from typing import Dict, cast + cast(Dict[str, str], {})["3rr0r"] + """, + "calls": [], + }, + "cast_second_arg": { + "code": """ + from typing import cast + cast(str, "foo") + """, + "calls": [], + }, + "cast_first_arg": { + "code": """ + from typing import cast + cast("int", "foo") + """, + "calls": [ + mock.call("int"), + ], + }, + "typevar_func": { + "code": """ + from typing import TypeVar + TypeVar("Name", func("int")) + """, + "calls": [], + }, + "literal": { + "code": """ + from typing import Literal + Literal[\"G\"] + """, + "calls": [], + }, + "nested_str": { + "code": r""" + from typing import TypeVar, Optional + from a import G + TypeVar("G2", bound="Optional[\"G\"]") + """, + "calls": [mock.call('Optional["G"]'), mock.call("G")], + }, + "class_self_ref": { + "code": """ + from typing import TypeVar + class HelperClass: + value: TypeVar("THelperClass", bound="HelperClass") + """, + "calls": [mock.call("HelperClass")], + }, + } + ) + def test_parse_string_annotations( + self, *, code: str, calls: Sequence[mock._Call] + ) -> None: + parse = cst.parse_module + with mock.patch("libcst.parse_module") as parse_mock: + parse_mock.side_effect = parse + get_scope_metadata_provider(dedent(code)) + calls = [mock.call(dedent(code))] + list(calls) + self.assertEqual(parse_mock.call_count, len(calls)) + parse_mock.assert_has_calls(calls) + + def test_builtin_scope(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + a = pow(1, 2) + def foo(): + b = pow(2, 3) + """ + ) + scope_of_module = scopes[m] + self.assertIsInstance(scope_of_module, GlobalScope) + self.assertEqual(len(scope_of_module["pow"]), 1) + builtin_pow_assignment = list(scope_of_module["pow"])[0] + self.assertIsInstance(builtin_pow_assignment, BuiltinAssignment) + self.assertIsInstance(builtin_pow_assignment.scope, BuiltinScope) + + global_a_assignments = scope_of_module["a"] + self.assertEqual(len(global_a_assignments), 1) + a_assignment = list(global_a_assignments)[0] + self.assertIsInstance(a_assignment, Assignment) + + func_body = ensure_type(m.body[1], cst.FunctionDef).body + func_statement = func_body.body[0] + scope_of_func_statement = scopes[func_statement] + self.assertIsInstance(scope_of_func_statement, FunctionScope) + func_b_assignments = scope_of_func_statement["b"] + self.assertEqual(len(func_b_assignments), 1) + b_assignment = list(func_b_assignments)[0] + self.assertIsInstance(b_assignment, Assignment) + + builtin_pow_accesses = list(builtin_pow_assignment.references) + self.assertEqual(len(builtin_pow_accesses), 2) + + def test_override_builtin_scope(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + def pow(x, y): + return x ** y + + a = pow(1, 2) + def foo(): + b = pow(2, 3) + """ + ) + scope_of_module = scopes[m] + self.assertIsInstance(scope_of_module, GlobalScope) + self.assertEqual(len(scope_of_module["pow"]), 1) + global_pow_assignment = list(scope_of_module["pow"])[0] + self.assertIsInstance(global_pow_assignment, Assignment) + self.assertIsInstance(global_pow_assignment.scope, GlobalScope) + + global_a_assignments = scope_of_module["a"] + self.assertEqual(len(global_a_assignments), 1) + a_assignment = list(global_a_assignments)[0] + self.assertIsInstance(a_assignment, Assignment) + + func_body = ensure_type(m.body[2], cst.FunctionDef).body + func_statement = func_body.body[0] + scope_of_func_statement = scopes[func_statement] + self.assertIsInstance(scope_of_func_statement, FunctionScope) + func_b_assignments = scope_of_func_statement["b"] + self.assertEqual(len(func_b_assignments), 1) + b_assignment = list(func_b_assignments)[0] + self.assertIsInstance(b_assignment, Assignment) + + global_pow_accesses = list(global_pow_assignment.references) + self.assertEqual(len(global_pow_accesses), 2) + + def test_annotation_access_in_typevar_bound(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + from typing import TypeVar + class Test: + var: TypeVar("T", bound="Test") + """ + ) + imp = ensure_type( + ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.ImportFrom + ) + scope = scopes[imp] + assignment = list(scope["Test"])[0] + self.assertIsInstance(assignment, Assignment) + self.assertEqual(len(assignment.references), 1) + references = list(assignment.references) + self.assertTrue(references[0].is_annotation) + + def test_prefix_match(self) -> None: + """Verify that a name doesn't overmatch on prefix""" + m, scopes = get_scope_metadata_provider( + """ + def something(): + ... + """ + ) + scope = scopes[m] + self.assertEqual( + scope.get_qualified_names_for(cst.Name("something")), + {QualifiedName(name="something", source=QualifiedNameSource.LOCAL)}, + ) + self.assertEqual( + scope.get_qualified_names_for(cst.Name("something_else")), + set(), + ) + + def test_type_alias_scope(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + type A = C + lol: A + """ + ) + alias = ensure_type( + ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.TypeAlias + ) + self.assertIsInstance(scopes[alias], GlobalScope) + a_assignments = list(scopes[alias]["A"]) + self.assertEqual(len(a_assignments), 1) + lol = ensure_type( + ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.AnnAssign + ) + self.assertEqual(len(a_references := list(a_assignments[0].references)), 1) + self.assertEqual(a_references[0].node, lol.annotation.annotation) + + self.assertIsInstance(scopes[alias.value], AnnotationScope) + + def test_type_alias_param(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + B = int + type A[T: B] = T + lol: T + """ + ) + alias = ensure_type( + ensure_type(m.body[1], cst.SimpleStatementLine).body[0], cst.TypeAlias + ) + assert alias.type_parameters + param_scope = scopes[alias.type_parameters] + self.assertEqual(len(t_assignments := list(param_scope["T"])), 1) + self.assertEqual(len(t_refs := list(t_assignments[0].references)), 1) + self.assertEqual(t_refs[0].node, alias.value) + + b = ( + ensure_type( + ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.Assign + ) + .targets[0] + .target + ) + b_assignment = list(scopes[b]["B"])[0] + self.assertEqual( + {ref.node for ref in b_assignment.references}, + {ensure_type(alias.type_parameters.params[0].param, cst.TypeVar).bound}, + ) + + def test_type_alias_tuple_and_paramspec(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + type A[*T] = T + lol: T + type A[**T] = T + lol: T + """ + ) + alias_tuple = ensure_type( + ensure_type(m.body[0], cst.SimpleStatementLine).body[0], cst.TypeAlias + ) + assert alias_tuple.type_parameters + param_scope = scopes[alias_tuple.type_parameters] + self.assertEqual(len(t_assignments := list(param_scope["T"])), 1) + self.assertEqual(len(t_refs := list(t_assignments[0].references)), 1) + self.assertEqual(t_refs[0].node, alias_tuple.value) + + alias_paramspec = ensure_type( + ensure_type(m.body[2], cst.SimpleStatementLine).body[0], cst.TypeAlias + ) + assert alias_paramspec.type_parameters + param_scope = scopes[alias_paramspec.type_parameters] + self.assertEqual(len(t_assignments := list(param_scope["T"])), 1) + self.assertEqual(len(t_refs := list(t_assignments[0].references)), 1) + self.assertEqual(t_refs[0].node, alias_paramspec.value) + + def test_class_type_params(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + class W[T]: + def f() -> T: pass + def g[T]() -> T: pass + """ + ) + cls = ensure_type(m.body[0], cst.ClassDef) + cls_scope = scopes[cls.body.body[0]] + self.assertEqual(len(t_assignments_in_cls := list(cls_scope["T"])), 1) + assert cls.type_parameters + self.assertEqual( + ensure_type(t_assignments_in_cls[0], Assignment).node, + cls.type_parameters.params[0].param, + ) + self.assertEqual( + len(t_refs_in_cls := list(t_assignments_in_cls[0].references)), 1 + ) + f = ensure_type(cls.body.body[0], cst.FunctionDef) + assert f.returns + self.assertEqual(t_refs_in_cls[0].node, f.returns.annotation) + + g = ensure_type(cls.body.body[1], cst.FunctionDef) + assert g.type_parameters + assert g.returns + self.assertEqual(len(t_assignments_in_g := list(scopes[g.body]["T"])), 1) + self.assertEqual( + ensure_type(t_assignments_in_g[0], Assignment).node, + g.type_parameters.params[0].param, + ) + self.assertEqual(len(t_refs_in_g := list(t_assignments_in_g[0].references)), 1) + self.assertEqual(t_refs_in_g[0].node, g.returns.annotation) + + def test_nested_class_type_params(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + class Outer: + class Nested[T: Outer]: pass + """ + ) + outer = ensure_type(m.body[0], cst.ClassDef) + outer_refs = list(list(scopes[outer]["Outer"])[0].references) + self.assertEqual(len(outer_refs), 1) + inner = ensure_type(outer.body.body[0], cst.ClassDef) + assert inner.type_parameters + self.assertEqual( + outer_refs[0].node, + ensure_type(inner.type_parameters.params[0].param, cst.TypeVar).bound, + ) + + def test_annotation_refers_to_nested_class(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + class Outer: + class Nested: + pass + + type Alias = Nested + + def meth1[T: Nested](self): pass + def meth2[T](self, arg: Nested): pass + """ + ) + outer = ensure_type(m.body[0], cst.ClassDef) + nested = ensure_type(outer.body.body[0], cst.ClassDef) + alias = ensure_type( + ensure_type(outer.body.body[1], cst.SimpleStatementLine).body[0], + cst.TypeAlias, + ) + self.assertIsInstance(scopes[alias.value], AnnotationScope) + nested_refs_within_alias = list(scopes[alias.value].accesses["Nested"]) + self.assertEqual(len(nested_refs_within_alias), 1) + self.assertEqual( + { + ensure_type(ref, Assignment).node + for ref in nested_refs_within_alias[0].referents + }, + {nested}, + ) + + meth1 = ensure_type(outer.body.body[2], cst.FunctionDef) + self.assertIsInstance(scopes[meth1], ClassScope) + assert meth1.type_parameters + meth1_typevar = ensure_type(meth1.type_parameters.params[0].param, cst.TypeVar) + meth1_typevar_scope = scopes[meth1_typevar] + self.assertIsInstance(meth1_typevar_scope, AnnotationScope) + nested_refs_within_meth1 = list(meth1_typevar_scope.accesses["Nested"]) + self.assertEqual(len(nested_refs_within_meth1), 1) + self.assertEqual( + { + ensure_type(ref, Assignment).node + for ref in nested_refs_within_meth1[0].referents + }, + {nested}, + ) + + meth2 = ensure_type(outer.body.body[3], cst.FunctionDef) + meth2_annotation = meth2.params.params[1].annotation + assert meth2_annotation + nested_refs_within_meth2 = list(scopes[meth2_annotation].accesses["Nested"]) + self.assertEqual(len(nested_refs_within_meth2), 1) + self.assertEqual( + { + ensure_type(ref, Assignment).node + for ref in nested_refs_within_meth2[0].referents + }, + {nested}, + ) + + def test_body_isnt_subject_to_special_annotation_rule(self) -> None: + m, scopes = get_scope_metadata_provider( + """ + class Outer: + class Inner: pass + def f[T: Inner](self): Inner + """ + ) + outer = ensure_type(m.body[0], cst.ClassDef) + # note: this is different from global scope + outer_scope = scopes[outer.body.body[0]] + inner_assignment = list(outer_scope["Inner"])[0] + self.assertEqual(len(inner_assignment.references), 1) + f = ensure_type(outer.body.body[1], cst.FunctionDef) + assert f.type_parameters + T = ensure_type(f.type_parameters.params[0].param, cst.TypeVar) + self.assertIs(list(inner_assignment.references)[0].node, T.bound) + + inner_in_func_body = ensure_type(f.body.body[0], cst.Expr) + f_scope = scopes[inner_in_func_body] + self.assertIn(inner_in_func_body.value, f_scope.accesses) + self.assertEqual(list(f_scope.accesses)[0].referents, set()) diff --git a/libcst/metadata/tests/test_span_provider.py b/libcst/metadata/tests/test_span_provider.py index 600b8820..01aaef37 100644 --- a/libcst/metadata/tests/test_span_provider.py +++ b/libcst/metadata/tests/test_span_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,10 +6,10 @@ import libcst as cst from libcst.metadata.span_provider import ( + byte_length_in_utf8, ByteSpanPositionProvider, CodeSpan, SpanProvidingCodegenState, - byte_length_in_utf8, ) from libcst.testing.utils import UnitTest diff --git a/libcst/metadata/tests/test_type_inference_provider.py b/libcst/metadata/tests/test_type_inference_provider.py index f6c97751..a0a70a8c 100644 --- a/libcst/metadata/tests/test_type_inference_provider.py +++ b/libcst/metadata/tests/test_type_inference_provider.py @@ -1,16 +1,21 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json +import os +import subprocess +import sys from pathlib import Path +from typing import cast, Mapping, Optional +from unittest import skipIf import libcst as cst from libcst import MetadataWrapper from libcst.metadata.type_inference_provider import PyreData, TypeInferenceProvider -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest from libcst.tests.test_pyre_integration import TEST_SUITE_PATH @@ -35,24 +40,50 @@ def _test_simple_class_helper(test: UnitTest, wrapper: MetadataWrapper) -> None: test.assertEqual(types[value], "int") # self - test.assertEqual( - types[self_number_attr.value], "libcst.tests.pyre.simple_class.Item" - ) + test.assertEqual(types[self_number_attr.value], "simple_class.Item") collector_assign = cst.ensure_type( cst.ensure_type(m.body[3], cst.SimpleStatementLine).body[0], cst.Assign ) collector = collector_assign.targets[0].target - test.assertEqual(types[collector], "libcst.tests.pyre.simple_class.ItemCollector") + test.assertEqual(types[collector], "simple_class.ItemCollector") items_assign = cst.ensure_type( cst.ensure_type(m.body[4], cst.SimpleStatementLine).body[0], cst.AnnAssign ) items = items_assign.target - test.assertEqual( - types[items], "typing.Sequence[libcst.tests.pyre.simple_class.Item]" - ) + test.assertEqual(types[items], "typing.Sequence[simple_class.Item]") +@skipIf( + sys.version_info < (3, 7), "TypeInferenceProvider doesn't support 3.6 and below" +) +@skipIf(sys.platform == "win32", "TypeInferenceProvider doesn't support windows") class TypeInferenceProviderTest(UnitTest): + maxDiff: Optional[int] = None + + @classmethod + def setUpClass(cls) -> None: + os.chdir(TEST_SUITE_PATH) + subprocess.run(["pyre", "-n", "start", "--no-watchman"]) + + @classmethod + def tearDownClass(cls) -> None: + subprocess.run(["pyre", "-n", "stop"], cwd=TEST_SUITE_PATH) + + @data_provider( + ((TEST_SUITE_PATH / "simple_class.py", TEST_SUITE_PATH / "simple_class.json"),) + ) + def test_gen_cache(self, source_path: Path, data_path: Path) -> None: + cache = TypeInferenceProvider.gen_cache( + root_path=source_path.parent, paths=[source_path.name], timeout=None + ) + result = cast(Mapping[str, object], cache[source_path.name]) + data: PyreData = json.loads(data_path.read_text()) + self.assertDictEqual( + data, + result, + "Pyre query result mismatch, try running `scripts/regenerate-fixtures.py`?", + ) + @data_provider( ((TEST_SUITE_PATH / "simple_class.py", TEST_SUITE_PATH / "simple_class.json"),) ) @@ -60,9 +91,13 @@ class TypeInferenceProviderTest(UnitTest): data: PyreData = json.loads(data_path.read_text()) wrapper = MetadataWrapper( cst.parse_module(source_path.read_text()), - # pyre-fixme[6]: Expected `Mapping[Type[BaseMetadataProvider[object]], - # Any]` for 2nd param but got `Dict[Type[TypeInferenceProvider], - # Sequence[InferredType]]`. cache={TypeInferenceProvider: data}, ) _test_simple_class_helper(self, wrapper) + + def test_with_empty_cache(self) -> None: + tip = TypeInferenceProvider({}) + self.assertEqual(tip.lookup, {}) + + tip = TypeInferenceProvider(PyreData()) + self.assertEqual(tip.lookup, {}) diff --git a/libcst/metadata/type_inference_provider.py b/libcst/metadata/type_inference_provider.py index 7cb7da28..8a90c26b 100644 --- a/libcst/metadata/type_inference_provider.py +++ b/libcst/metadata/type_inference_provider.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,9 +6,7 @@ import json import subprocess from pathlib import Path -from typing import Dict, List, Mapping, Optional, Sequence, Tuple - -from mypy_extensions import TypedDict +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, TypedDict import libcst as cst from libcst._position import CodePosition, CodeRange @@ -16,6 +14,11 @@ from libcst.metadata.base_provider import BatchableMetadataProvider from libcst.metadata.position_provider import PositionProvider +class TypeInferenceError(Exception): + """An attempt to access inferred type annotation + (through Pyre Query API) failed.""" + + class Position(TypedDict): line: int column: int @@ -32,14 +35,14 @@ class InferredType(TypedDict): annotation: str -class PyreData(TypedDict): +class PyreData(TypedDict, total=False): types: Sequence[InferredType] class TypeInferenceProvider(BatchableMetadataProvider[str]): """ Access inferred type annotation through `Pyre Query API `_. - It requires `setup watchman `_ + It requires `setup watchman `_ and start pyre server by running ``pyre`` command. The inferred type is a string of `type annotation `_. E.g. ``typing.List[libcst._nodes.expression.Name]`` @@ -52,29 +55,36 @@ class TypeInferenceProvider(BatchableMetadataProvider[str]): METADATA_DEPENDENCIES = (PositionProvider,) - @staticmethod + @classmethod def gen_cache( - root_path: Path, paths: List[str], timeout: Optional[int] + cls, + root_path: Path, + paths: List[str], + timeout: Optional[int] = None, + **kwargs: Any, ) -> Mapping[str, object]: params = ",".join(f"path='{root_path / path}'" for path in paths) - cmd = f'''pyre --noninteractive query "types({params})"''' - try: - stdout, stderr, return_code = run_command(cmd, timeout=timeout) - except subprocess.TimeoutExpired as exc: - raise exc + cmd_args = ["pyre", "--noninteractive", "query", f"types({params})"] + + result = subprocess.run( + cmd_args, capture_output=True, timeout=timeout, text=True + ) - if return_code != 0: - raise Exception(f"stderr:\n {stderr}\nstdout:\n {stdout}") try: - resp = json.loads(stdout)["response"] + result.check_returncode() + resp = json.loads(result.stdout)["response"] except Exception as e: - raise Exception(f"{e}\n\nstderr:\n {stderr}\nstdout:\n {stdout}") + raise TypeInferenceError( + f"{e}\n\nstderr:\n {result.stderr}\nstdout:\n {result.stdout}" + ) from e + return {path: _process_pyre_data(data) for path, data in zip(paths, resp)} def __init__(self, cache: PyreData) -> None: super().__init__(cache) lookup: Dict[CodeRange, str] = {} - for item in cache["types"]: + cache_types = cache.get("types", []) + for item in cache_types: location = item["location"] start = location["start"] end = location["stop"] @@ -101,14 +111,6 @@ class TypeInferenceProvider(BatchableMetadataProvider[str]): self._parse_metadata(node) -def run_command(command: str, timeout: Optional[int] = None) -> Tuple[str, str, int]: - process = subprocess.Popen( - command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True - ) - stdout, stderr = process.communicate(timeout=timeout) - return stdout.decode(), stderr.decode(), process.returncode - - class RawPyreData(TypedDict): path: str types: Sequence[InferredType] diff --git a/libcst/metadata/wrapper.py b/libcst/metadata/wrapper.py index 6c31b17f..a9a712ca 100644 --- a/libcst/metadata/wrapper.py +++ b/libcst/metadata/wrapper.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,8 +8,8 @@ import textwrap from contextlib import ExitStack from types import MappingProxyType from typing import ( - TYPE_CHECKING, Any, + cast, Collection, Iterable, Mapping, @@ -17,15 +17,14 @@ from typing import ( MutableSet, Optional, Type, + TYPE_CHECKING, TypeVar, - cast, ) -from libcst._batched_visitor import BatchableCSTVisitor, VisitorMethod, visit_batched +from libcst._batched_visitor import BatchableCSTVisitor, visit_batched, VisitorMethod from libcst._exceptions import MetadataException from libcst.metadata.base_provider import BatchableMetadataProvider - if TYPE_CHECKING: from libcst._nodes.base import CSTNode # noqa: F401 from libcst._nodes.module import Module # noqa: F401 @@ -74,10 +73,9 @@ def _resolve_impl( Updates the _metadata map on wrapper with metadata from the given providers as well as their dependencies. """ - providers = set(providers) - set(wrapper._metadata.keys()) - remaining = _gather_providers(providers, set()) + completed = set(wrapper._metadata.keys()) + remaining = _gather_providers(set(providers), set()) - completed - completed = set() while len(remaining) > 0: batchable = set() @@ -180,7 +178,6 @@ class MetadataWrapper: else: metadata = self.resolve_many([provider])[provider] - # pyre-ignore Pyre doesn't recognize "CSTNode" in this contxt. return cast(Mapping["CSTNode", _T], metadata) def resolve_many( diff --git a/libcst/testing/__init__.py b/libcst/testing/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/testing/__init__.py +++ b/libcst/testing/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/testing/utils.py b/libcst/testing/utils.py index cba70ed1..8a320571 100644 --- a/libcst/testing/utils.py +++ b/libcst/testing/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -22,7 +22,6 @@ from typing import ( ) from unittest import TestCase - DATA_PROVIDER_DATA_ATTR_NAME = "__data_provider_data" DATA_PROVIDER_DESCRIPTION_PREFIX = "_data_provider_" PROVIDER_TEST_LIMIT_ATTR_NAME = "__provider_test_limit" @@ -65,7 +64,6 @@ def populate_data_provider_tests(dct: Dict[str, Any]) -> None: member_name, member, DATA_PROVIDER_DATA_ATTR_NAME ) if provider_data is not None: - for description, data in ( provider_data.items() if isinstance(provider_data, dict) @@ -134,7 +132,7 @@ def validate_provider_tests(dct: Dict[str, Any]) -> None: + "these combinations." ) - test_replacement.__name__ = member_name + setattr(test_replacement, "__name__", member_name) members_to_replace[member_name] = test_replacement for member_name, new_member in members_to_replace.items(): diff --git a/libcst/tests/__init__.py b/libcst/tests/__init__.py index 62642369..7bec24cb 100644 --- a/libcst/tests/__init__.py +++ b/libcst/tests/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/tests/__main__.py b/libcst/tests/__main__.py new file mode 100644 index 00000000..df28d1a6 --- /dev/null +++ b/libcst/tests/__main__.py @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from unittest import main + + +if __name__ == "__main__": + main(module=None, verbosity=2) diff --git a/libcst/tests/pyre/.pyre_configuration b/libcst/tests/pyre/.pyre_configuration new file mode 100644 index 00000000..c3018faf --- /dev/null +++ b/libcst/tests/pyre/.pyre_configuration @@ -0,0 +1,6 @@ +{ + "source_directories": [ + "." + ], + "search_path": [] +} diff --git a/libcst/tests/pyre/simple_class.json b/libcst/tests/pyre/simple_class.json index dc621ff3..85192559 100644 --- a/libcst/tests/pyre/simple_class.json +++ b/libcst/tests/pyre/simple_class.json @@ -1,511 +1,485 @@ { "types": [ { + "annotation": "typing.Type[typing.Sequence]", "location": { "start": { - "line": 7, - "column": 19 + "column": 19, + "line": 7 }, "stop": { - "line": 7, - "column": 27 + "column": 27, + "line": 7 } - }, - "annotation": "typing.Type[typing.Sequence]" + } }, { + "annotation": "typing.Type[simple_class.Item]", "location": { "start": { - "line": 10, - "column": 6 + "column": 6, + "line": 10 }, "stop": { - "line": 10, - "column": 10 + "column": 10, + "line": 10 } - }, - "annotation": "typing.Type[libcst.tests.pyre.simple_class.Item]" + } }, { + "annotation": "typing.Callable(simple_class.Item.__init__)[[Named(self, simple_class.Item), Named(n, int)], None]", "location": { "start": { - "line": 11, - "column": 8 + "column": 8, + "line": 11 }, "stop": { - "line": 11, - "column": 16 + "column": 16, + "line": 11 } - }, - "annotation": "typing.Callable(libcst.tests.pyre.simple_class.Item.__init__)[[Named(self, unknown), Named(n, int)], None]" + } }, { + "annotation": "simple_class.Item", "location": { "start": { - "line": 11, - "column": 17 + "column": 17, + "line": 11 }, "stop": { - "line": 11, - "column": 21 + "column": 21, + "line": 11 } - }, - "annotation": "libcst.tests.pyre.simple_class.Item" + } }, { + "annotation": "int", "location": { "start": { - "line": 11, - "column": 23 + "column": 23, + "line": 11 }, "stop": { - "line": 11, - "column": 24 + "column": 24, + "line": 11 } - }, - "annotation": "int" + } }, { + "annotation": "typing.Type[int]", "location": { "start": { - "line": 11, - "column": 26 + "column": 26, + "line": 11 }, "stop": { - "line": 11, - "column": 29 + "column": 29, + "line": 11 } - }, - "annotation": "typing.Type[int]" + } }, { + "annotation": "typing.Type[None]", "location": { "start": { - "line": 11, - "column": 34 + "column": 34, + "line": 11 }, "stop": { - "line": 11, - "column": 38 + "column": 38, + "line": 11 } - }, - "annotation": "None" + } }, { + "annotation": "simple_class.Item", "location": { "start": { - "line": 12, - "column": 8 + "column": 8, + "line": 12 }, "stop": { - "line": 12, - "column": 12 + "column": 12, + "line": 12 } - }, - "annotation": "libcst.tests.pyre.simple_class.Item" + } }, { + "annotation": "int", "location": { "start": { - "line": 12, - "column": 8 + "column": 8, + "line": 12 }, "stop": { - "line": 12, - "column": 19 + "column": 19, + "line": 12 } - }, - "annotation": "int" + } }, { + "annotation": "typing.Type[int]", "location": { "start": { - "line": 12, - "column": 21 + "column": 21, + "line": 12 }, "stop": { - "line": 12, - "column": 24 + "column": 24, + "line": 12 } - }, - "annotation": "typing.Type[int]" + } }, { + "annotation": "int", "location": { "start": { - "line": 12, - "column": 27 + "column": 27, + "line": 12 }, "stop": { - "line": 12, - "column": 28 + "column": 28, + "line": 12 } - }, - "annotation": "int" + } }, { + "annotation": "typing.Type[simple_class.ItemCollector]", "location": { "start": { - "line": 15, - "column": 6 + "column": 6, + "line": 15 }, "stop": { - "line": 15, - "column": 19 + "column": 19, + "line": 15 } - }, - "annotation": "typing.Type[libcst.tests.pyre.simple_class.ItemCollector]" + } }, { + "annotation": "typing.Callable(simple_class.ItemCollector.get_items)[[Named(self, simple_class.ItemCollector), Named(n, int)], typing.Sequence[simple_class.Item]]", "location": { "start": { - "line": 16, - "column": 8 + "column": 8, + "line": 16 }, "stop": { - "line": 16, - "column": 17 + "column": 17, + "line": 16 } - }, - "annotation": "typing.Callable(libcst.tests.pyre.simple_class.ItemCollector.get_items)[[Named(self, unknown), Named(n, int)], typing.Sequence[libcst.tests.pyre.simple_class.Item]]" + } }, { + "annotation": "simple_class.ItemCollector", "location": { "start": { - "line": 16, - "column": 18 + "column": 18, + "line": 16 }, "stop": { - "line": 16, - "column": 22 + "column": 22, + "line": 16 } - }, - "annotation": "libcst.tests.pyre.simple_class.ItemCollector" + } }, { + "annotation": "int", "location": { "start": { - "line": 16, - "column": 24 + "column": 24, + "line": 16 }, "stop": { - "line": 16, - "column": 25 + "column": 25, + "line": 16 } - }, - "annotation": "int" + } }, { + "annotation": "typing.Type[int]", "location": { "start": { - "line": 16, - "column": 27 + "column": 27, + "line": 16 }, "stop": { - "line": 16, - "column": 30 + "column": 30, + "line": 16 } - }, - "annotation": "typing.Type[int]" + } }, { + "annotation": "typing.Type[typing.Sequence[simple_class.Item]]", "location": { "start": { - "line": 16, - "column": 35 + "column": 35, + "line": 16 }, "stop": { - "line": 16, - "column": 43 + "column": 49, + "line": 16 } - }, - "annotation": "typing.Callable(typing.GenericMeta.__getitem__)[[typing.Type[Variable[typing._T_co](covariant)]], typing.Type[typing.Sequence[Variable[typing._T_co](covariant)]]]" + } }, { + "annotation": "typing.List[simple_class.Item]", "location": { "start": { - "line": 16, - "column": 35 + "column": 15, + "line": 17 }, "stop": { - "line": 16, - "column": 49 + "column": 42, + "line": 17 } - }, - "annotation": "typing.Type[typing.Sequence[libcst.tests.pyre.simple_class.Item]]" + } }, { + "annotation": "typing.Type[simple_class.Item]", "location": { "start": { - "line": 16, - "column": 44 + "column": 16, + "line": 17 }, "stop": { - "line": 16, - "column": 48 + "column": 20, + "line": 17 } - }, - "annotation": "typing.Type[libcst.tests.pyre.simple_class.Item]" + } }, { + "annotation": "simple_class.Item", "location": { "start": { - "line": 17, - "column": 15 + "column": 16, + "line": 17 }, "stop": { - "line": 17, - "column": 42 + "column": 23, + "line": 17 } - }, - "annotation": "typing.List[libcst.tests.pyre.simple_class.Item]" + } }, { + "annotation": "int", "location": { "start": { - "line": 17, - "column": 16 + "column": 28, + "line": 17 }, "stop": { - "line": 17, - "column": 20 + "column": 29, + "line": 17 } - }, - "annotation": "typing.Type[libcst.tests.pyre.simple_class.Item]" + } }, { + "annotation": "typing.Type[range]", "location": { "start": { - "line": 17, - "column": 16 + "column": 33, + "line": 17 }, "stop": { - "line": 17, - "column": 23 + "column": 38, + "line": 17 } - }, - "annotation": "libcst.tests.pyre.simple_class.Item" + } }, { + "annotation": "range", "location": { "start": { - "line": 17, - "column": 28 + "column": 33, + "line": 17 }, "stop": { - "line": 17, - "column": 29 + "column": 41, + "line": 17 } - }, - "annotation": "int" + } }, { + "annotation": "int", "location": { "start": { - "line": 17, - "column": 33 + "column": 39, + "line": 17 }, "stop": { - "line": 17, - "column": 38 + "column": 40, + "line": 17 } - }, - "annotation": "typing.Type[range]" + } }, { + "annotation": "simple_class.ItemCollector", "location": { "start": { - "line": 17, - "column": 33 + "column": 0, + "line": 20 }, "stop": { - "line": 17, - "column": 41 + "column": 9, + "line": 20 } - }, - "annotation": "range" + } }, { + "annotation": "typing.Type[simple_class.ItemCollector]", "location": { "start": { - "line": 17, - "column": 39 + "column": 12, + "line": 20 }, "stop": { - "line": 17, - "column": 40 + "column": 25, + "line": 20 } - }, - "annotation": "int" + } }, { + "annotation": "simple_class.ItemCollector", "location": { "start": { - "line": 20, - "column": 0 + "column": 12, + "line": 20 }, "stop": { - "line": 20, - "column": 9 + "column": 27, + "line": 20 } - }, - "annotation": "libcst.tests.pyre.simple_class.ItemCollector" + } }, { + "annotation": "typing.Sequence[simple_class.Item]", "location": { "start": { - "line": 20, - "column": 12 + "column": 0, + "line": 21 }, "stop": { - "line": 20, - "column": 25 + "column": 5, + "line": 21 } - }, - "annotation": "typing.Type[libcst.tests.pyre.simple_class.ItemCollector]" + } }, { + "annotation": "typing.Type[typing.Sequence[simple_class.Item]]", "location": { "start": { - "line": 20, - "column": 12 + "column": 7, + "line": 21 }, "stop": { - "line": 20, - "column": 27 + "column": 21, + "line": 21 } - }, - "annotation": "libcst.tests.pyre.simple_class.ItemCollector" + } }, { + "annotation": "simple_class.ItemCollector", "location": { "start": { - "line": 21, - "column": 0 + "column": 24, + "line": 21 }, "stop": { - "line": 21, - "column": 5 + "column": 33, + "line": 21 } - }, - "annotation": "typing.Sequence[libcst.tests.pyre.simple_class.Item]" + } }, { + "annotation": "BoundMethod[typing.Callable(simple_class.ItemCollector.get_items)[[Named(self, simple_class.ItemCollector), Named(n, int)], typing.Sequence[simple_class.Item]], simple_class.ItemCollector]", "location": { "start": { - "line": 21, - "column": 7 + "column": 24, + "line": 21 }, "stop": { - "line": 21, - "column": 21 + "column": 43, + "line": 21 } - }, - "annotation": "typing.Type[typing.Sequence[libcst.tests.pyre.simple_class.Item]]" + } }, { + "annotation": "typing.Sequence[simple_class.Item]", "location": { "start": { - "line": 21, - "column": 24 + "column": 24, + "line": 21 }, "stop": { - "line": 21, - "column": 33 + "column": 46, + "line": 21 } - }, - "annotation": "libcst.tests.pyre.simple_class.ItemCollector" + } }, { + "annotation": "typing_extensions.Literal[3]", "location": { "start": { - "line": 21, - "column": 24 + "column": 44, + "line": 21 }, "stop": { - "line": 21, - "column": 43 + "column": 45, + "line": 21 } - }, - "annotation": "typing.Callable(libcst.tests.pyre.simple_class.ItemCollector.get_items)[[Named(n, int)], typing.Sequence[libcst.tests.pyre.simple_class.Item]]" + } }, { + "annotation": "simple_class.Item", "location": { "start": { - "line": 21, - "column": 24 + "column": 4, + "line": 22 }, "stop": { - "line": 21, - "column": 46 + "column": 8, + "line": 22 } - }, - "annotation": "typing.Sequence[libcst.tests.pyre.simple_class.Item]" + } }, { + "annotation": "simple_class.Item", "location": { "start": { - "line": 21, - "column": 44 + "column": 12, + "line": 22 }, "stop": { - "line": 21, - "column": 45 + "column": 17, + "line": 22 } - }, - "annotation": "typing_extensions.Literal[3]" + } }, { + "annotation": "simple_class.Item", "location": { "start": { - "line": 22, - "column": 4 + "column": 4, + "line": 23 }, "stop": { - "line": 22, - "column": 8 + "column": 8, + "line": 23 } - }, - "annotation": "libcst.tests.pyre.simple_class.Item" + } }, { + "annotation": "int", "location": { "start": { - "line": 22, - "column": 12 + "column": 4, + "line": 23 }, "stop": { - "line": 22, - "column": 17 + "column": 15, + "line": 23 } - }, - "annotation": "typing.Sequence[libcst.tests.pyre.simple_class.Item]" - }, - { - "location": { - "start": { - "line": 23, - "column": 4 - }, - "stop": { - "line": 23, - "column": 8 - } - }, - "annotation": "libcst.tests.pyre.simple_class.Item" - }, - { - "location": { - "start": { - "line": 23, - "column": 4 - }, - "stop": { - "line": 23, - "column": 15 - } - }, - "annotation": "int" + } } ] } \ No newline at end of file diff --git a/libcst/tests/pyre/simple_class.py b/libcst/tests/pyre/simple_class.py index 2ee2d32f..d9d6784c 100644 --- a/libcst/tests/pyre/simple_class.py +++ b/libcst/tests/pyre/simple_class.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/tests/test_add_slots.py b/libcst/tests/test_add_slots.py new file mode 100644 index 00000000..e354f60b --- /dev/null +++ b/libcst/tests/test_add_slots.py @@ -0,0 +1,46 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import pickle +from dataclasses import dataclass +from typing import ClassVar + +from libcst._add_slots import add_slots + +from libcst.testing.utils import UnitTest + + +# this test class needs to be defined at module level to test pickling. +@add_slots +@dataclass(frozen=True) +class A: + x: int + y: str + + Z: ClassVar[int] = 5 + + +class AddSlotsTest(UnitTest): + def test_pickle(self) -> None: + a = A(1, "foo") + self.assertEqual(a, pickle.loads(pickle.dumps(a))) + object.__delattr__(a, "y") + self.assertEqual(a.x, pickle.loads(pickle.dumps(a)).x) + + def test_prevents_slots_overlap(self) -> None: + class A: + __slots__ = ("x",) + + class B(A): + __slots__ = ("z",) + + @add_slots + @dataclass + class C(B): + x: int + y: str + z: bool + + self.assertSequenceEqual(C.__slots__, ("y",)) diff --git a/libcst/tests/test_batched_visitor.py b/libcst/tests/test_batched_visitor.py index ee3351f4..9009847c 100644 --- a/libcst/tests/test_batched_visitor.py +++ b/libcst/tests/test_batched_visitor.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -16,57 +16,57 @@ class BatchedVisitorTest(UnitTest): mock = Mock() class ABatchable(BatchableCSTVisitor): - def visit_Pass(self, node: cst.Pass) -> None: - mock.visited_a() - object.__setattr__(node, "a_attr", True) + def visit_Del(self, node: cst.Del) -> None: + object.__setattr__(node, "target", mock.visited_a()) class BBatchable(BatchableCSTVisitor): - def visit_Pass(self, node: cst.Pass) -> None: - mock.visited_b() - object.__setattr__(node, "b_attr", 1) + def visit_Del(self, node: cst.Del) -> None: + object.__setattr__(node, "semicolon", mock.visited_b()) - module = visit_batched(parse_module("pass"), [ABatchable(), BBatchable()]) - pass_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] - - # Check properties were set - self.assertEqual(object.__getattribute__(pass_, "a_attr"), True) - self.assertEqual(object.__getattribute__(pass_, "b_attr"), 1) + module = visit_batched(parse_module("del a"), [ABatchable(), BBatchable()]) + del_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] # Check that each visitor was only called once mock.visited_a.assert_called_once() mock.visited_b.assert_called_once() + # Check properties were set + self.assertEqual(object.__getattribute__(del_, "target"), mock.visited_a()) + self.assertEqual(object.__getattribute__(del_, "semicolon"), mock.visited_b()) + def test_all_visits(self) -> None: mock = Mock() class Batchable(BatchableCSTVisitor): - def visit_Pass(self, node: cst.Pass) -> None: - mock.visit_Pass() - object.__setattr__(node, "visit_Pass", True) + def visit_If(self, node: cst.If) -> None: + object.__setattr__(node, "test", mock.visit_If()) - def visit_Pass_semicolon(self, node: cst.Pass) -> None: - mock.visit_Pass_semicolon() - object.__setattr__(node, "visit_Pass_semicolon", True) + def visit_If_body(self, node: cst.If) -> None: + object.__setattr__(node, "leading_lines", mock.visit_If_body()) - def leave_Pass_semicolon(self, node: cst.Pass) -> None: - mock.leave_Pass_semicolon() - object.__setattr__(node, "leave_Pass_semicolon", True) + def leave_If_body(self, node: cst.If) -> None: + object.__setattr__(node, "orelse", mock.leave_If_body()) - def leave_Pass(self, original_node: cst.Pass) -> None: - mock.leave_Pass() - object.__setattr__(original_node, "leave_Pass", True) + def leave_If(self, original_node: cst.If) -> None: + object.__setattr__( + original_node, "whitespace_before_test", mock.leave_If() + ) - module = visit_batched(parse_module("pass"), [Batchable()]) - pass_ = cast(cst.SimpleStatementLine, module.body[0]).body[0] - - # Check properties were set - self.assertEqual(object.__getattribute__(pass_, "visit_Pass"), True) - self.assertEqual(object.__getattribute__(pass_, "leave_Pass"), True) - self.assertEqual(object.__getattribute__(pass_, "visit_Pass_semicolon"), True) - self.assertEqual(object.__getattribute__(pass_, "leave_Pass_semicolon"), True) + module = visit_batched(parse_module("if True: pass"), [Batchable()]) + if_ = cast(cst.SimpleStatementLine, module.body[0]) # Check that each visitor was only called once - mock.visit_Pass.assert_called_once() - mock.leave_Pass.assert_called_once() - mock.visit_Pass_semicolon.assert_called_once() - mock.leave_Pass_semicolon.assert_called_once() + mock.visit_If.assert_called_once() + mock.leave_If.assert_called_once() + mock.visit_If_body.assert_called_once() + mock.leave_If_body.assert_called_once() + + # Check properties were set + self.assertEqual(object.__getattribute__(if_, "test"), mock.visit_If()) + self.assertEqual( + object.__getattribute__(if_, "leading_lines"), mock.visit_If_body() + ) + self.assertEqual(object.__getattribute__(if_, "orelse"), mock.leave_If_body()) + self.assertEqual( + object.__getattribute__(if_, "whitespace_before_test"), mock.leave_If() + ) diff --git a/libcst/tests/test_deep_clone.py b/libcst/tests/test_deep_clone.py index 6df62d40..b6cf2be5 100644 --- a/libcst/tests/test_deep_clone.py +++ b/libcst/tests/test_deep_clone.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -6,7 +6,7 @@ from textwrap import dedent from typing import Set import libcst as cst -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class DeepCloneTest(UnitTest): diff --git a/libcst/tests/test_deep_replace.py b/libcst/tests/test_deep_replace.py index 77c29f09..d8e5b475 100644 --- a/libcst/tests/test_deep_replace.py +++ b/libcst/tests/test_deep_replace.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/tests/test_e2e.py b/libcst/tests/test_e2e.py new file mode 100644 index 00000000..e6dfdb5c --- /dev/null +++ b/libcst/tests/test_e2e.py @@ -0,0 +1,92 @@ +import contextlib +import os +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Dict, Generator +from unittest import TestCase + +from libcst import BaseExpression, Call, matchers as m, Name +from libcst.codemod import ( + CodemodContext, + gather_files, + parallel_exec_transform_with_prettyprint, + VisitorBasedCodemodCommand, +) +from libcst.codemod.visitors import AddImportsVisitor + + +class PrintToPPrintCommand(VisitorBasedCodemodCommand): + def __init__(self, context: CodemodContext, **kwargs: Dict[str, object]) -> None: + super().__init__(context, **kwargs) + self.context.scratch["PPRINT_WAS_HERE"] = True + + def leave_Call(self, original_node: Call, updated_node: Call) -> BaseExpression: + if not self.context.scratch["PPRINT_WAS_HERE"]: + raise AssertionError("Scratch space lost") + + if m.matches(updated_node, m.Call(func=m.Name("print"))): + AddImportsVisitor.add_needed_import( + self.context, + "pprint", + "pprint", + ) + return updated_node.with_changes(func=Name("pprint")) + return super().leave_Call(original_node, updated_node) + + +@contextlib.contextmanager +def temp_workspace() -> Generator[Path, None, None]: + cwd = os.getcwd() + with TemporaryDirectory() as temp_dir: + try: + ws = Path(temp_dir).resolve() + os.chdir(ws) + yield ws + finally: + os.chdir(cwd) + + +class ToolE2ETest(TestCase): + def test_leaky_codemod(self) -> None: + for msg, command in [ + ("instantiated", PrintToPPrintCommand(CodemodContext())), + ("class", PrintToPPrintCommand), + ]: + with self.subTest(msg), temp_workspace() as tmp: + # File to trigger codemod + example: Path = tmp / "example.py" + example.write_text("""print("Hello")""") + # File that should not be modified + other = tmp / "other.py" + other.touch() + # Just a dir named "dir.py", should be ignored + adir = tmp / "dir.py" + adir.mkdir() + + # Run command + files = gather_files(".") + result = parallel_exec_transform_with_prettyprint( + command, + files, + format_code=False, + hide_progress=True, + ) + + print(result) + + # Check results + self.assertEqual(2, result.successes) + self.assertEqual(0, result.skips) + self.assertEqual(0, result.failures) + # Expect example.py to be modified + self.assertIn( + "from pprint import pprint", + example.read_text(), + "import missing in example.py", + ) + # Expect other.py to NOT be modified + self.assertNotIn( + "from pprint import pprint", + other.read_text(), + "import found in other.py", + ) diff --git a/libcst/tests/test_exceptions.py b/libcst/tests/test_exceptions.py index a1c7d842..f54f1da6 100644 --- a/libcst/tests/test_exceptions.py +++ b/libcst/tests/test_exceptions.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -8,7 +8,7 @@ import pickle from textwrap import dedent import libcst as cst -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class ExceptionsTest(UnitTest): diff --git a/libcst/tests/test_fuzz.py b/libcst/tests/test_fuzz.py index 590449c7..6ec95136 100644 --- a/libcst/tests/test_fuzz.py +++ b/libcst/tests/test_fuzz.py @@ -20,7 +20,6 @@ from hypothesmith import from_grammar import libcst - # If in doubt, you should use these "unit test" settings. They tune the timeouts # and example-reproduction behaviour for these tests' unusually large inputs. hypothesis.settings.register_profile( @@ -51,6 +50,9 @@ class FuzzTest(unittest.TestCase): @unittest.skipUnless( bool(os.environ.get("HYPOTHESIS", False)), "Hypothesis not requested" ) + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `hypothesis.given($parameter$source_code = + # hypothesmith.from_grammar($parameter$start = "file_input"))`. @hypothesis.given(source_code=from_grammar(start="file_input")) def test_parsing_compilable_module_strings(self, source_code: str) -> None: """The `from_grammar()` strategy generates strings from Python's grammar. @@ -78,6 +80,9 @@ class FuzzTest(unittest.TestCase): @unittest.skipUnless( bool(os.environ.get("HYPOTHESIS", False)), "Hypothesis not requested" ) + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `hypothesis.given($parameter$source_code = + # hypothesmith.from_grammar($parameter$start = "eval_input").map(str.strip))`. @hypothesis.given(source_code=from_grammar(start="eval_input").map(str.strip)) def test_parsing_compilable_expression_strings(self, source_code: str) -> None: """Much like statements, but for expressions this time. @@ -106,6 +111,10 @@ class FuzzTest(unittest.TestCase): @unittest.skipUnless( bool(os.environ.get("HYPOTHESIS", False)), "Hypothesis not requested" ) + # pyre-fixme[56]: Pyre was not able to infer the type of the decorator + # `hypothesis.given($parameter$source_code = + # hypothesmith.from_grammar($parameter$start = "single_input").map(lambda + # ($parameter$s) (s.replace(" @hypothesis.given( source_code=from_grammar(start="single_input").map( lambda s: s.replace("\n", "") + "\n" diff --git a/libcst/tests/test_import.py b/libcst/tests/test_import.py new file mode 100644 index 00000000..cad8883d --- /dev/null +++ b/libcst/tests/test_import.py @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +from unittest import TestCase + + +class TestImport(TestCase): + def test_import_libcst(self) -> None: + import libcst # noqa: F401 diff --git a/libcst/tests/test_pyre_integration.py b/libcst/tests/test_pyre_integration.py index 11fd7f8d..679b2d5e 100644 --- a/libcst/tests/test_pyre_integration.py +++ b/libcst/tests/test_pyre_integration.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -10,13 +10,8 @@ from typing import Dict, List, Mapping, Optional, Tuple, Union import libcst as cst from libcst.metadata import MetadataWrapper, PositionProvider -from libcst.metadata.type_inference_provider import ( - PyreData, - _process_pyre_data, - run_command, -) -from libcst.testing.utils import UnitTest, data_provider - +from libcst.metadata.type_inference_provider import PyreData +from libcst.testing.utils import data_provider, UnitTest TEST_SUITE_PATH: Path = Path(__file__).parent / "pyre" @@ -63,12 +58,7 @@ class TypeVerificationVisitor(cst.CSTVisitor): end = pos.end tup = (start.line, start.column, end.line, end.column) # remove this if condition when the type issues are fixed. - if not any( - node.deep_equals(name) and tup == _tup - for (name, _tup) in { - (cst.Name("i"), (17, 21, 17, 22)), - } - ): + if node.value not in {"n", "i"}: self.test.assertIn( tup, self.lookup, @@ -95,6 +85,10 @@ class TypeVerificationVisitor(cst.CSTVisitor): class PyreIntegrationTest(UnitTest): + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `comprehension((source_path, data_path) for generators(generator((source_path, + # data_path) in zip(TEST_SUITE_PATH.glob("*.py"), TEST_SUITE_PATH.glob("*.json")) + # if )))` to decorator factory `libcst.testing.utils.data_provider`. @data_provider( ( (source_path, data_path) @@ -118,26 +112,7 @@ class PyreIntegrationTest(UnitTest): if __name__ == "__main__": - """Run this script directly to generate pyre data for test suite (tests/pyre/*.py)""" - print("start pyre server") - stdout: str - stderr: str - return_code: int - stdout, stderr, return_code = run_command("pyre start") - if return_code != 0: - print(stdout) - print(stderr) + import sys - for path in TEST_SUITE_PATH.glob("*.py"): - cmd = f'''pyre query "types(path='{path}')"''' - print(cmd) - stdout, stderr, return_code = run_command(cmd) - if return_code != 0: - print(stdout) - print(stderr) - data = json.loads(stdout) - data = data["response"][0] - data = _process_pyre_data(data) - output_path = path.with_suffix(".json") - print(f"write output to {output_path}") - output_path.write_text(json.dumps(data, indent=2)) + print("run `scripts/regenerate-fixtures.py` instead") + sys.exit(1) diff --git a/libcst/tests/test_roundtrip.py b/libcst/tests/test_roundtrip.py new file mode 100644 index 00000000..96d1e507 --- /dev/null +++ b/libcst/tests/test_roundtrip.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +from pathlib import Path +from unittest import TestCase + +from libcst import CSTTransformer, parse_module + + +fixtures: Path = Path(__file__).parent.parent.parent / "native/libcst/tests/fixtures" + + +class NOOPTransformer(CSTTransformer): + pass + + +class RoundTripTests(TestCase): + def _get_fixtures(self) -> list[Path]: + self.assertTrue(fixtures.exists(), f"{fixtures} should exist") + files = list(fixtures.iterdir()) + self.assertGreater(len(files), 0) + return files + + def test_clean_roundtrip(self) -> None: + for file in self._get_fixtures(): + with self.subTest(file=str(file)): + src = file.read_text(encoding="utf-8") + mod = parse_module(src) + self.maxDiff = None + self.assertEqual(mod.code, src) + + def test_transform_roundtrip(self) -> None: + transformer = NOOPTransformer() + self.maxDiff = None + for file in self._get_fixtures(): + with self.subTest(file=str(file)): + src = file.read_text(encoding="utf-8") + mod = parse_module(src) + new_mod = mod.visit(transformer) + self.assertEqual(src, new_mod.code) diff --git a/libcst/tests/test_tabs.py b/libcst/tests/test_tabs.py index f72824c5..23ff9047 100644 --- a/libcst/tests/test_tabs.py +++ b/libcst/tests/test_tabs.py @@ -1,10 +1,10 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst._tabs import expand_tabs -from libcst.testing.utils import UnitTest, data_provider +from libcst.testing.utils import data_provider, UnitTest class ExpandTabsTest(UnitTest): diff --git a/libcst/tests/test_type_enforce.py b/libcst/tests/test_type_enforce.py index edc283e5..f6fecc7d 100644 --- a/libcst/tests/test_type_enforce.py +++ b/libcst/tests/test_type_enforce.py @@ -1,17 +1,17 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from types import MappingProxyType from typing import ( - TYPE_CHECKING, Any, AsyncGenerator, ClassVar, Dict, Iterable, List, + Literal, Mapping, MutableMapping, NamedTuple, @@ -20,14 +20,12 @@ from typing import ( Set, Tuple, Type, + TYPE_CHECKING, Union, ) -from typing_extensions import Literal - from libcst._type_enforce import is_value_of_type -from libcst.testing.utils import UnitTest, data_provider - +from libcst.testing.utils import data_provider, UnitTest if TYPE_CHECKING: from collections import Counter # noqa: F401 diff --git a/libcst/tests/test_visitor.py b/libcst/tests/test_visitor.py index 5bc0510e..bf540553 100644 --- a/libcst/tests/test_visitor.py +++ b/libcst/tests/test_visitor.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. diff --git a/libcst/tool.py b/libcst/tool.py index de626ac3..a2164b11 100644 --- a/libcst/tool.py +++ b/libcst/tool.py @@ -1,4 +1,4 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. @@ -9,28 +9,23 @@ # python -m libcst.tool print python_file.py import argparse -import dataclasses -import distutils.spawn import importlib import inspect import os import os.path +import shutil import sys import textwrap from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, List, Sequence, Tuple, Type +from typing import Any, Callable, Dict, List, Tuple, Type -import yaml +try: + import yaml_ft as yaml # pyre-ignore +except ModuleNotFoundError: + import yaml -from libcst import ( - LIBCST_VERSION, - CSTNode, - IndentedBlock, - Module, - PartialParserConfig, - parse_module, -) -from libcst._nodes.deep_equals import deep_equals +from libcst import CSTLogicError, LIBCST_VERSION, parse_module, PartialParserConfig +from libcst._parser.parso.utils import parse_version_string from libcst.codemod import ( CodemodCommand, CodemodContext, @@ -39,186 +34,8 @@ from libcst.codemod import ( gather_files, parallel_exec_transform_with_prettyprint, ) - - -_DEFAULT_INDENT: str = " " - - -def _node_repr_recursive( # noqa: C901 - node: object, - *, - indent: str = _DEFAULT_INDENT, - show_defaults: bool = False, - show_syntax: bool = False, - show_whitespace: bool = False, -) -> List[str]: - if isinstance(node, CSTNode): - # This is a CSTNode, we must pretty-print it. - tokens: List[str] = [node.__class__.__name__] - fields: Sequence["dataclasses.Field[object]"] = dataclasses.fields(node) - - # Hide all fields prefixed with "_" - fields = [f for f in fields if f.name[0] != "_"] - - # Filter whitespace nodes if needed - if not show_whitespace: - - def _is_whitespace(field: "dataclasses.Field[object]") -> bool: - if "whitespace" in field.name: - return True - if "leading_lines" in field.name: - return True - if "lines_after_decorators" in field.name: - return True - if isinstance(node, (IndentedBlock, Module)) and field.name in [ - "header", - "footer", - ]: - return True - if isinstance(node, IndentedBlock) and field.name == "indent": - return True - return False - - fields = [f for f in fields if not _is_whitespace(f)] - # Filter values which aren't changed from their defaults - if not show_defaults: - - def _get_default(fld: "dataclasses.Field[object]") -> object: - if fld.default_factory is not dataclasses.MISSING: - return fld.default_factory() - return fld.default - - fields = [ - f - for f in fields - if not deep_equals(getattr(node, f.name), _get_default(f)) - ] - # Filter out values which aren't interesting if needed - if not show_syntax: - - def _is_syntax(field: "dataclasses.Field[object]") -> bool: - if isinstance(node, Module) and field.name in [ - "encoding", - "default_indent", - "default_newline", - "has_trailing_newline", - ]: - return True - type_str = repr(field.type) - if ( - "Sentinel" in type_str - and field.name not in ["star_arg", "star", "posonly_ind"] - and "whitespace" not in field.name - ): - # This is a value that can optionally be specified, so its - # definitely syntax. - return True - - for name in ["Semicolon", "Colon", "Comma", "Dot", "AssignEqual"]: - # These are all nodes that exist for separation syntax - if name in type_str: - return True - - return False - - fields = [f for f in fields if not _is_syntax(f)] - - if len(fields) == 0: - tokens.append("()") - else: - tokens.append("(\n") - - for field in fields: - child_tokens: List[str] = [field.name, "="] - value = getattr(node, field.name) - - if isinstance(value, (str, bytes)) or not isinstance(value, Sequence): - # Render out the node contents - child_tokens.extend( - _node_repr_recursive( - value, - show_whitespace=show_whitespace, - show_defaults=show_defaults, - show_syntax=show_syntax, - ) - ) - elif isinstance(value, Sequence): - # Render out a list of individual nodes - if len(value) > 0: - child_tokens.append("[\n") - list_tokens: List[str] = [] - - last_value = len(value) - 1 - for j, v in enumerate(value): - list_tokens.extend( - _node_repr_recursive( - v, - show_whitespace=show_whitespace, - show_defaults=show_defaults, - show_syntax=show_syntax, - ) - ) - if j != last_value: - list_tokens.append(",\n") - else: - list_tokens.append(",") - - split_by_line = "".join(list_tokens).split("\n") - child_tokens.append( - "\n".join(f"{indent}{t}" for t in split_by_line) - ) - - child_tokens.append("\n]") - else: - child_tokens.append("[]") - else: - raise Exception("Logic error!") - - # Handle indentation and trailing comma. - split_by_line = "".join(child_tokens).split("\n") - tokens.append("\n".join(f"{indent}{t}" for t in split_by_line)) - tokens.append(",\n") - - tokens.append(")") - - return tokens - else: - # This is a python value, just return the repr - return [repr(node)] - - -def dump( - node: CSTNode, - *, - indent: str = _DEFAULT_INDENT, - show_defaults: bool = False, - show_syntax: bool = False, - show_whitespace: bool = False, -) -> str: - """ - Returns a string representation of the node that contains minimal differences - from the default contruction of the node while also hiding whitespace and - syntax fields. - - Setting ``show_default`` to ``True`` will add fields regardless if their - value is different from the default value. - - Setting ``show_whitespace`` will add whitespace fields and setting - ``show_syntax`` will add syntax fields while respecting the value of - ``show_default``. - - When all keyword args are set to true, the output of this function is - indentical to the __repr__ method of the node. - """ - return "".join( - _node_repr_recursive( - node, - indent=indent, - show_defaults=show_defaults, - show_syntax=show_syntax, - show_whitespace=show_whitespace, - ) - ) +from libcst.display import dump, dump_graphviz +from libcst.display.text import _DEFAULT_INDENT def _print_tree_impl(proc_name: str, command_args: List[str]) -> int: @@ -248,6 +65,16 @@ def _print_tree_impl(proc_name: str, command_args: List[str]) -> int: action="store_true", help="Show values that exist only for syntax, like commas or semicolons", ) + parser.add_argument( + "--graphviz", + action="store_true", + help="Displays the graph in .dot format, compatible with Graphviz", + ) + parser.add_argument( + "--indent-string", + default=_DEFAULT_INDENT, + help=f"String to use for indenting levels, defaults to {_DEFAULT_INDENT!r}", + ) parser.add_argument( "-p", "--python-version", @@ -277,14 +104,25 @@ def _print_tree_impl(proc_name: str, command_args: List[str]) -> int: else PartialParserConfig() ), ) - print( - dump( - tree, - show_defaults=args.show_defaults, - show_syntax=args.show_syntax, - show_whitespace=args.show_whitespace, + if not args.graphviz: + print( + dump( + tree, + indent=args.indent_string, + show_defaults=args.show_defaults, + show_syntax=args.show_syntax, + show_whitespace=args.show_whitespace, + ) + ) + else: + print( + dump_graphviz( + tree, + show_defaults=args.show_defaults, + show_syntax=args.show_syntax, + show_whitespace=args.show_whitespace, + ) ) - ) return 0 @@ -356,7 +194,7 @@ def _find_and_load_config(proc_name: str) -> Dict[str, Any]: requires_config = bool(os.environ.get("LIBCST_TOOL_REQUIRE_CONFIG", "")) if requires_config and not found_config: - raise Exception( + raise FileNotFoundError( f"Did not find a {CONFIG_FILE_NAME} in current directory or any " + "parent directory! Perhaps you meant to run this command from a " + "configured subdirectory, or you need to initialize a new project " @@ -365,10 +203,7 @@ def _find_and_load_config(proc_name: str) -> Dict[str, Any]: # Make sure that the formatter is findable. if config["formatter"]: - exe = ( - distutils.spawn.find_executable(config["formatter"][0]) - or config["formatter"][0] - ) + exe = shutil.which(config["formatter"][0]) or config["formatter"][0] config["formatter"] = [os.path.abspath(exe), *config["formatter"][1:]] return config @@ -383,38 +218,49 @@ def _codemod_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901 # full parser below once we know the command and have added its arguments. parser = argparse.ArgumentParser(add_help=False, fromfile_prefix_chars="@") parser.add_argument("command", metavar="COMMAND", type=str, nargs="?", default=None) + ext_action = parser.add_argument( + "-x", + "--external", + action="store_true", + default=False, + help="Interpret `command` as just a module/class specifier", + ) args, _ = parser.parse_known_args(command_args) # Now, try to load the class and get its arguments for help purposes. if args.command is not None: - command_path = args.command.split(".") - if len(command_path) < 2: + command_module_name, _, command_class_name = args.command.rpartition(".") + if not (command_module_name and command_class_name): print(f"{args.command} is not a valid codemod command", file=sys.stderr) return 1 - command_module_name, command_class_name = ( - ".".join(command_path[:-1]), - command_path[-1], - ) - command_class = None - for module in config["modules"]: - try: - command_class = getattr( - importlib.import_module(f"{module}.{command_module_name}"), - command_class_name, - ) - break - # Only swallow known import errors, show the rest of the exceptions - # to the user who is trying to run the codemod. - except AttributeError: - continue - except ModuleNotFoundError: - continue - if command_class is None: - print( - f"Could not find {command_module_name} in any configured modules", - file=sys.stderr, + if args.external: + # There's no error handling here on purpose; if the user opted in for `-x`, + # they'll probably want to see the exact import error too. + command_class = getattr( + importlib.import_module(command_module_name), + command_class_name, ) - return 1 + else: + command_class = None + for module in config["modules"]: + try: + command_class = getattr( + importlib.import_module(f"{module}.{command_module_name}"), + command_class_name, + ) + break + # Only swallow known import errors, show the rest of the exceptions + # to the user who is trying to run the codemod. + except AttributeError: + continue + except ModuleNotFoundError: + continue + if command_class is None: + print( + f"Could not find {command_module_name} in any configured modules", + file=sys.stderr, + ) + return 1 else: # Dummy, specifically to allow for running --help with no arguments. command_class = CodemodCommand @@ -429,6 +275,7 @@ def _codemod_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901 prog=f"{proc_name} codemod", fromfile_prefix_chars="@", ) + parser._add_action(ext_action) parser.add_argument( "command", metavar="COMMAND", @@ -514,33 +361,45 @@ def _codemod_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901 k: v for k, v in vars(args).items() if k - not in [ + not in { "command", - "path", - "unified_diff", - "jobs", - "python_version", + "external", + "hide_blacklisted_warnings", + "hide_generated_warnings", + "hide_progress", "include_generated", "include_stubs", + "jobs", "no_format", + "path", + "python_version", "show_successes", - "hide_generated_warnings", - "hide_blacklisted_warnings", - "hide_progress", - ] + "unified_diff", + } } - command_instance = command_class(CodemodContext(), **codemod_args) + # Sepcify target version for black formatter + if any(config["formatter"]) and os.path.basename(config["formatter"][0]) in ( + "black", + "black.exe", + ): + parsed_version = parse_version_string(args.python_version) + + config["formatter"] = [ + config["formatter"][0], + "--target-version", + f"py{parsed_version.major}{parsed_version.minor}", + ] + config["formatter"][1:] # Special case for allowing stdin/stdout. Note that this does not allow for # full-repo metadata since there is no path. if any(p == "-" for p in args.path): if len(args.path) > 1: - raise Exception("Cannot specify multiple paths when reading from stdin!") + raise ValueError("Cannot specify multiple paths when reading from stdin!") print("Codemodding from stdin", file=sys.stderr) oldcode = sys.stdin.read() newcode = exec_transform_with_prettyprint( - command_instance, + command_class(CodemodContext(), **codemod_args), # type: ignore oldcode, include_generated=args.include_generated, generated_code_marker=config["generated_code_marker"], @@ -563,7 +422,7 @@ def _codemod_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901 files = gather_files(args.path, include_stubs=args.include_stubs) try: result = parallel_exec_transform_with_prettyprint( - command_instance, + command_class, files, jobs=args.jobs, unified_diff=args.unified_diff, @@ -578,6 +437,7 @@ def _codemod_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901 blacklist_patterns=config["blacklist_patterns"], python_version=args.python_version, repo_root=config["repo_root"], + codemod_args=codemod_args, ) except KeyboardInterrupt: print("Interrupted!", file=sys.stderr) @@ -606,8 +466,7 @@ class _SerializerBase(ABC): return f"{comments}{os.linesep}{self._serialize_impl(key, value)}{os.linesep}" @abstractmethod - def _serialize_impl(self, key: str, value: object) -> str: - ... + def _serialize_impl(self, key: str, value: object) -> str: ... class _StrSerializer(_SerializerBase): @@ -622,7 +481,7 @@ class _ListSerializer(_SerializerBase): def _serialize_impl(self, key: str, value: object) -> str: if not isinstance(value, list): - raise Exception("Can only serialize lists!") + raise ValueError("Can only serialize lists!") if self.newlines: values = [f"- {v!r}" for v in value] return f"{key}:{os.linesep}{os.linesep.join(values)}" @@ -683,7 +542,7 @@ def _initialize_impl(proc_name: str, command_args: List[str]) -> int: # For safety, verify that it parses to the identical file. actual_config = yaml.safe_load(config_str) if actual_config != default_config: - raise Exception("Logic error, serialization is invalid!") + raise CSTLogicError("Logic error, serialization is invalid!") config_file = os.path.abspath(os.path.join(args.path, CONFIG_FILE_NAME)) with open(config_file, "w") as fp: @@ -752,6 +611,8 @@ def _list_impl(proc_name: str, command_args: List[str]) -> int: # noqa: C901 continue # Grab the path, try to import all of the files inside of it. + # pyre-fixme[6]: For 1st argument expected `PathLike[Variable[AnyStr <: + # [str, bytes]]]` but got `Optional[str]`. path = os.path.dirname(os.path.abspath(imported_module.__file__)) for name, imported_module in _recursive_find(path, module): for objname in dir(imported_module): @@ -801,7 +662,7 @@ def main(proc_name: str, cli_args: List[str]) -> int: "--version", help="Print current version of LibCST toolset.", action="version", - version=f"LibCST version {LIBCST_VERSION}", + version=f"LibCST version {LIBCST_VERSION}", # pyre-ignore[16] pyre bug? ) parser.add_argument( "action", diff --git a/native/Cargo.lock b/native/Cargo.lock new file mode 100644 index 00000000..16ffd999 --- /dev/null +++ b/native/Cargo.lock @@ -0,0 +1,877 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "annotate-snippets" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "710e8eae58854cdc1790fcb56cca04d712a17be849eeb81da2a724bf4bae2bc4" +dependencies = [ + "anstyle", + "unicode-width", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bumpalo" +version = "3.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciborium" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" + +[[package]] +name = "ciborium-ll" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "4.5.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "criterion" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools 0.13.0", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset 0.6.5", + "once_cell", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "difference" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "indexmap" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "indoc" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "libc" +version = "0.2.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" + +[[package]] +name = "libcst" +version = "1.8.6" +dependencies = [ + "annotate-snippets", + "criterion", + "difference", + "itertools 0.14.0", + "libcst_derive", + "memchr", + "paste", + "peg", + "pyo3", + "rayon", + "regex", + "thiserror", +] + +[[package]] +name = "libcst_derive" +version = "1.8.6" +dependencies = [ + "quote", + "syn", + "trybuild", +] + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "peg" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9928cfca101b36ec5163e70049ee5368a8a1c3c6efc9ca9c5f9cc2f816152477" +dependencies = [ + "peg-macros", + "peg-runtime", +] + +[[package]] +name = "peg-macros" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6298ab04c202fa5b5d52ba03269fb7b74550b150323038878fe6c372d8280f71" +dependencies = [ + "peg-runtime", + "proc-macro2", + "quote", +] + +[[package]] +name = "peg-runtime" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "132dca9b868d927b35b5dd728167b2dee150eb1ad686008fc71ccb298b776fca" + +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "portable-atomic" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "pyo3" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ba0117f4212101ee6544044dae45abe1083d30ce7b29c4b5cbdfa2354e07383" +dependencies = [ + "indoc", + "libc", + "memoffset 0.9.0", + "once_cell", + "portable-atomic", + "pyo3-build-config", + "pyo3-ffi", + "pyo3-macros", + "unindent", +] + +[[package]] +name = "pyo3-build-config" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fc6ddaf24947d12a9aa31ac65431fb1b851b8f4365426e182901eabfb87df5f" +dependencies = [ + "target-lexicon", +] + +[[package]] +name = "pyo3-ffi" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "025474d3928738efb38ac36d4744a74a400c901c7596199e20e45d98eb194105" +dependencies = [ + "libc", + "pyo3-build-config", +] + +[[package]] +name = "pyo3-macros" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e64eb489f22fe1c95911b77c44cc41e7c19f3082fc81cce90f657cdc42ffded" +dependencies = [ + "proc-macro2", + "pyo3-macros-backend", + "quote", + "syn", +] + +[[package]] +name = "pyo3-macros-backend" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "100246c0ecf400b475341b8455a9213344569af29a3c841d29270e53102e0fcf" +dependencies = [ + "heck", + "proc-macro2", + "pyo3-build-config", + "quote", + "syn", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "regex" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "ryu" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "serde" +version = "1.0.208" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.208" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.125" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +dependencies = [ + "serde", +] + +[[package]] +name = "syn" +version = "2.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "target-lexicon" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" + +[[package]] +name = "target-triple" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" + +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "trybuild" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c9bf9513a2f4aeef5fdac8677d7d349c79fdbcc03b9c86da6e9d254f1e43be2" +dependencies = [ + "glob", + "serde", + "serde_derive", + "serde_json", + "target-triple", + "termcolor", + "toml", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + +[[package]] +name = "unindent" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" + +[[package]] +name = "walkdir" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +dependencies = [ + "same-file", + "winapi", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "winnow" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +dependencies = [ + "memchr", +] diff --git a/native/Cargo.toml b/native/Cargo.toml new file mode 100644 index 00000000..3a0b79f7 --- /dev/null +++ b/native/Cargo.toml @@ -0,0 +1,6 @@ +[workspace] + +members = [ + "libcst", + "libcst_derive", +] diff --git a/native/libcst/Cargo.toml b/native/libcst/Cargo.toml new file mode 100644 index 00000000..e4c9f45f --- /dev/null +++ b/native/libcst/Cargo.toml @@ -0,0 +1,55 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +[package] +name = "libcst" +version = "1.8.6" +authors = ["LibCST Developers"] +edition = "2018" +rust-version = "1.70" +description = "A Python parser and Concrete Syntax Tree library." +license = "MIT AND (MIT AND PSF-2.0)" +repository = "https://github.com/Instagram/LibCST" +documentation = "https://libcst.rtfd.org" +keywords = ["python", "cst", "ast"] +categories = ["parser-implementations"] + +[lib] +name = "libcst_native" +crate-type = ["cdylib", "rlib"] + +[[bin]] +name = "parse" +path = "src/bin.rs" + +[features] +# This is a bit of a hack, since `cargo test` doesn't work with `extension-module`. +# To run tests, use `cargo test --no-default-features`. +# +# Once https://github.com/PyO3/pyo3/pull/1123 lands, it may be better to use +# `-Zextra-link-arg` for this instead. +default = ["py"] +py = ["pyo3", "pyo3/extension-module"] +trace = ["peg/trace"] + +[dependencies] +paste = "1.0.15" +pyo3 = { version = "0.26", optional = true } +thiserror = "2.0.12" +peg = "0.8.5" +annotate-snippets = "0.11.5" +regex = "1.11.2" +memchr = "2.7.4" +libcst_derive = { path = "../libcst_derive", version = "1.8.6" } + +[dev-dependencies] +criterion = { version = "0.6.0", features = ["html_reports"] } +difference = "2.0.0" +rayon = "1.11.0" +itertools = "0.14.0" + +[[bench]] +name = "parser_benchmark" +harness = false diff --git a/native/libcst/Grammar b/native/libcst/Grammar new file mode 100644 index 00000000..274db713 --- /dev/null +++ b/native/libcst/Grammar @@ -0,0 +1,707 @@ +# PEG grammar for Python 3.9 + +@trailer ''' +void * +_PyPegen_parse(Parser *p) +{ + // Initialize keywords + p->keywords = reserved_keywords; + p->n_keyword_lists = n_keyword_lists; + + // Run parser + void *result = NULL; + if (p->start_rule == Py_file_input) { + result = file_rule(p); + } else if (p->start_rule == Py_single_input) { + result = interactive_rule(p); + } else if (p->start_rule == Py_eval_input) { + result = eval_rule(p); + } else if (p->start_rule == Py_func_type_input) { + result = func_type_rule(p); + } else if (p->start_rule == Py_fstring_input) { + result = fstring_rule(p); + } + + return result; +} + +// The end +''' +file[mod_ty]: a=[statements] ENDMARKER { _PyPegen_make_module(p, a) } +interactive[mod_ty]: a=statement_newline { Interactive(a, p->arena) } +eval[mod_ty]: a=expressions NEWLINE* ENDMARKER { Expression(a, p->arena) } +func_type[mod_ty]: '(' a=[type_expressions] ')' '->' b=expression NEWLINE* ENDMARKER { FunctionType(a, b, p->arena) } +fstring[expr_ty]: star_expressions + +# type_expressions allow */** but ignore them +type_expressions[asdl_seq*]: + | a=','.expression+ ',' '*' b=expression ',' '**' c=expression { + _PyPegen_seq_append_to_end(p, CHECK(_PyPegen_seq_append_to_end(p, a, b)), c) } + | a=','.expression+ ',' '*' b=expression { _PyPegen_seq_append_to_end(p, a, b) } + | a=','.expression+ ',' '**' b=expression { _PyPegen_seq_append_to_end(p, a, b) } + | '*' a=expression ',' '**' b=expression { + _PyPegen_seq_append_to_end(p, CHECK(_PyPegen_singleton_seq(p, a)), b) } + | '*' a=expression { _PyPegen_singleton_seq(p, a) } + | '**' a=expression { _PyPegen_singleton_seq(p, a) } + | ','.expression+ + +statements[asdl_seq*]: a=statement+ { _PyPegen_seq_flatten(p, a) } +statement[asdl_seq*]: a=compound_stmt { _PyPegen_singleton_seq(p, a) } | simple_stmt +statement_newline[asdl_seq*]: + | a=compound_stmt NEWLINE { _PyPegen_singleton_seq(p, a) } + | simple_stmt + | NEWLINE { _PyPegen_singleton_seq(p, CHECK(_Py_Pass(EXTRA))) } + | ENDMARKER { _PyPegen_interactive_exit(p) } +simple_stmt[asdl_seq*]: + | a=small_stmt !';' NEWLINE { _PyPegen_singleton_seq(p, a) } # Not needed, there for speedup + | a=';'.small_stmt+ [';'] NEWLINE { a } +# NOTE: assignment MUST precede expression, else parsing a simple assignment +# will throw a SyntaxError. +small_stmt[stmt_ty] (memo): + | assignment + | e=star_expressions { _Py_Expr(e, EXTRA) } + | &'return' return_stmt + | &('import' | 'from') import_stmt + | &'raise' raise_stmt + | 'pass' { _Py_Pass(EXTRA) } + | &'del' del_stmt + | &'yield' yield_stmt + | &'assert' assert_stmt + | 'break' { _Py_Break(EXTRA) } + | 'continue' { _Py_Continue(EXTRA) } + | &'global' global_stmt + | &'nonlocal' nonlocal_stmt +compound_stmt[stmt_ty]: + | &('def' | '@' | ASYNC) function_def + | &'if' if_stmt + | &('class' | '@') class_def + | &('with' | ASYNC) with_stmt + | &('for' | ASYNC) for_stmt + | &'try' try_stmt + | &'while' while_stmt + +# NOTE: annotated_rhs may start with 'yield'; yield_expr must start with 'yield' +assignment[stmt_ty]: + | a=NAME ':' b=expression c=['=' d=annotated_rhs { d }] { + CHECK_VERSION( + 6, + "Variable annotation syntax is", + _Py_AnnAssign(CHECK(_PyPegen_set_expr_context(p, a, Store)), b, c, 1, EXTRA) + ) } + | a=('(' b=single_target ')' { b } + | single_subscript_attribute_target) ':' b=expression c=['=' d=annotated_rhs { d }] { + CHECK_VERSION(6, "Variable annotations syntax is", _Py_AnnAssign(a, b, c, 0, EXTRA)) } + | a=(z=star_targets '=' { z })+ b=(yield_expr | star_expressions) !'=' tc=[TYPE_COMMENT] { + _Py_Assign(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA) } + | a=single_target b=augassign ~ c=(yield_expr | star_expressions) { + _Py_AugAssign(a, b->kind, c, EXTRA) } + | invalid_assignment + +augassign[AugOperator*]: + | '+=' { _PyPegen_augoperator(p, Add) } + | '-=' { _PyPegen_augoperator(p, Sub) } + | '*=' { _PyPegen_augoperator(p, Mult) } + | '@=' { CHECK_VERSION(5, "The '@' operator is", _PyPegen_augoperator(p, MatMult)) } + | '/=' { _PyPegen_augoperator(p, Div) } + | '%=' { _PyPegen_augoperator(p, Mod) } + | '&=' { _PyPegen_augoperator(p, BitAnd) } + | '|=' { _PyPegen_augoperator(p, BitOr) } + | '^=' { _PyPegen_augoperator(p, BitXor) } + | '<<=' { _PyPegen_augoperator(p, LShift) } + | '>>=' { _PyPegen_augoperator(p, RShift) } + | '**=' { _PyPegen_augoperator(p, Pow) } + | '//=' { _PyPegen_augoperator(p, FloorDiv) } + +global_stmt[stmt_ty]: 'global' a=','.NAME+ { + _Py_Global(CHECK(_PyPegen_map_names_to_ids(p, a)), EXTRA) } +nonlocal_stmt[stmt_ty]: 'nonlocal' a=','.NAME+ { + _Py_Nonlocal(CHECK(_PyPegen_map_names_to_ids(p, a)), EXTRA) } + +yield_stmt[stmt_ty]: y=yield_expr { _Py_Expr(y, EXTRA) } + +assert_stmt[stmt_ty]: 'assert' a=expression b=[',' z=expression { z }] { _Py_Assert(a, b, EXTRA) } + +del_stmt[stmt_ty]: + | 'del' a=del_targets &(';' | NEWLINE) { _Py_Delete(a, EXTRA) } + | invalid_del_stmt + +import_stmt[stmt_ty]: import_name | import_from +import_name[stmt_ty]: 'import' a=dotted_as_names { _Py_Import(a, EXTRA) } +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from[stmt_ty]: + | 'from' a=('.' | '...')* b=dotted_name 'import' c=import_from_targets { + _Py_ImportFrom(b->v.Name.id, c, _PyPegen_seq_count_dots(a), EXTRA) } + | 'from' a=('.' | '...')+ 'import' b=import_from_targets { + _Py_ImportFrom(NULL, b, _PyPegen_seq_count_dots(a), EXTRA) } +import_from_targets[asdl_seq*]: + | '(' a=import_from_as_names [','] ')' { a } + | import_from_as_names !',' + | '*' { _PyPegen_singleton_seq(p, CHECK(_PyPegen_alias_for_star(p))) } + | invalid_import_from_targets +import_from_as_names[asdl_seq*]: + | a=','.import_from_as_name+ { a } +import_from_as_name[alias_ty]: + | a=NAME b=['as' z=NAME { z }] { _Py_alias(a->v.Name.id, + (b) ? ((expr_ty) b)->v.Name.id : NULL, + p->arena) } +dotted_as_names[asdl_seq*]: + | a=','.dotted_as_name+ { a } +dotted_as_name[alias_ty]: + | a=dotted_name b=['as' z=NAME { z }] { _Py_alias(a->v.Name.id, + (b) ? ((expr_ty) b)->v.Name.id : NULL, + p->arena) } +dotted_name[expr_ty]: + | a=dotted_name '.' b=NAME { _PyPegen_join_names_with_dot(p, a, b) } + | NAME + +if_stmt[stmt_ty]: + | 'if' a=named_expression ':' b=block c=elif_stmt { _Py_If(a, b, CHECK(_PyPegen_singleton_seq(p, c)), EXTRA) } + | 'if' a=named_expression ':' b=block c=[else_block] { _Py_If(a, b, c, EXTRA) } +elif_stmt[stmt_ty]: + | 'elif' a=named_expression ':' b=block c=elif_stmt { _Py_If(a, b, CHECK(_PyPegen_singleton_seq(p, c)), EXTRA) } + | 'elif' a=named_expression ':' b=block c=[else_block] { _Py_If(a, b, c, EXTRA) } +else_block[asdl_seq*]: 'else' ':' b=block { b } + +while_stmt[stmt_ty]: + | 'while' a=named_expression ':' b=block c=[else_block] { _Py_While(a, b, c, EXTRA) } + +for_stmt[stmt_ty]: + | 'for' t=star_targets 'in' ~ ex=star_expressions ':' tc=[TYPE_COMMENT] b=block el=[else_block] { + _Py_For(t, ex, b, el, NEW_TYPE_COMMENT(p, tc), EXTRA) } + | ASYNC 'for' t=star_targets 'in' ~ ex=star_expressions ':' tc=[TYPE_COMMENT] b=block el=[else_block] { + CHECK_VERSION(5, "Async for loops are", _Py_AsyncFor(t, ex, b, el, NEW_TYPE_COMMENT(p, tc), EXTRA)) } + | invalid_for_target + +with_stmt[stmt_ty]: + | 'with' '(' a=','.with_item+ ','? ')' ':' b=block { + _Py_With(a, b, NULL, EXTRA) } + | 'with' a=','.with_item+ ':' tc=[TYPE_COMMENT] b=block { + _Py_With(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA) } + | ASYNC 'with' '(' a=','.with_item+ ','? ')' ':' b=block { + CHECK_VERSION(5, "Async with statements are", _Py_AsyncWith(a, b, NULL, EXTRA)) } + | ASYNC 'with' a=','.with_item+ ':' tc=[TYPE_COMMENT] b=block { + CHECK_VERSION(5, "Async with statements are", _Py_AsyncWith(a, b, NEW_TYPE_COMMENT(p, tc), EXTRA)) } +with_item[withitem_ty]: + | e=expression 'as' t=star_target &(',' | ')' | ':') { _Py_withitem(e, t, p->arena) } + | invalid_with_item + | e=expression { _Py_withitem(e, NULL, p->arena) } + +try_stmt[stmt_ty]: + | 'try' ':' b=block f=finally_block { _Py_Try(b, NULL, NULL, f, EXTRA) } + | 'try' ':' b=block ex=except_block+ el=[else_block] f=[finally_block] { _Py_Try(b, ex, el, f, EXTRA) } +except_block[excepthandler_ty]: + | 'except' e=expression t=['as' z=NAME { z }] ':' b=block { + _Py_ExceptHandler(e, (t) ? ((expr_ty) t)->v.Name.id : NULL, b, EXTRA) } + | 'except' ':' b=block { _Py_ExceptHandler(NULL, NULL, b, EXTRA) } +finally_block[asdl_seq*]: 'finally' ':' a=block { a } + +return_stmt[stmt_ty]: + | 'return' a=[star_expressions] { _Py_Return(a, EXTRA) } + +raise_stmt[stmt_ty]: + | 'raise' a=expression b=['from' z=expression { z }] { _Py_Raise(a, b, EXTRA) } + | 'raise' { _Py_Raise(NULL, NULL, EXTRA) } + +function_def[stmt_ty]: + | d=decorators f=function_def_raw { _PyPegen_function_def_decorators(p, d, f) } + | function_def_raw + +function_def_raw[stmt_ty]: + | 'def' n=NAME '(' params=[params] ')' a=['->' z=expression { z }] ':' tc=[func_type_comment] b=block { + _Py_FunctionDef(n->v.Name.id, + (params) ? params : CHECK(_PyPegen_empty_arguments(p)), + b, NULL, a, NEW_TYPE_COMMENT(p, tc), EXTRA) } + | ASYNC 'def' n=NAME '(' params=[params] ')' a=['->' z=expression { z }] ':' tc=[func_type_comment] b=block { + CHECK_VERSION( + 5, + "Async functions are", + _Py_AsyncFunctionDef(n->v.Name.id, + (params) ? params : CHECK(_PyPegen_empty_arguments(p)), + b, NULL, a, NEW_TYPE_COMMENT(p, tc), EXTRA) + ) } +func_type_comment[Token*]: + | NEWLINE t=TYPE_COMMENT &(NEWLINE INDENT) { t } # Must be followed by indented block + | invalid_double_type_comments + | TYPE_COMMENT + +params[arguments_ty]: + | invalid_parameters + | parameters + +parameters[arguments_ty]: + | a=slash_no_default b=param_no_default* c=param_with_default* d=[star_etc] { + _PyPegen_make_arguments(p, a, NULL, b, c, d) } + | a=slash_with_default b=param_with_default* c=[star_etc] { + _PyPegen_make_arguments(p, NULL, a, NULL, b, c) } + | a=param_no_default+ b=param_with_default* c=[star_etc] { + _PyPegen_make_arguments(p, NULL, NULL, a, b, c) } + | a=param_with_default+ b=[star_etc] { _PyPegen_make_arguments(p, NULL, NULL, NULL, a, b)} + | a=star_etc { _PyPegen_make_arguments(p, NULL, NULL, NULL, NULL, a) } + +# Some duplication here because we can't write (',' | &')'), +# which is because we don't support empty alternatives (yet). +# +slash_no_default[asdl_seq*]: + | a=param_no_default+ '/' ',' { a } + | a=param_no_default+ '/' &')' { a } +slash_with_default[SlashWithDefault*]: + | a=param_no_default* b=param_with_default+ '/' ',' { _PyPegen_slash_with_default(p, a, b) } + | a=param_no_default* b=param_with_default+ '/' &')' { _PyPegen_slash_with_default(p, a, b) } + +star_etc[StarEtc*]: + | '*' a=param_no_default b=param_maybe_default* c=[kwds] { + _PyPegen_star_etc(p, a, b, c) } + | '*' ',' b=param_maybe_default+ c=[kwds] { + _PyPegen_star_etc(p, NULL, b, c) } + | a=kwds { _PyPegen_star_etc(p, NULL, NULL, a) } + | invalid_star_etc + +kwds[arg_ty]: '**' a=param_no_default { a } + +# One parameter. This *includes* a following comma and type comment. +# +# There are three styles: +# - No default +# - With default +# - Maybe with default +# +# There are two alternative forms of each, to deal with type comments: +# - Ends in a comma followed by an optional type comment +# - No comma, optional type comment, must be followed by close paren +# The latter form is for a final parameter without trailing comma. +# +param_no_default[arg_ty]: + | a=param ',' tc=TYPE_COMMENT? { _PyPegen_add_type_comment_to_arg(p, a, tc) } + | a=param tc=TYPE_COMMENT? &')' { _PyPegen_add_type_comment_to_arg(p, a, tc) } +param_with_default[NameDefaultPair*]: + | a=param c=default ',' tc=TYPE_COMMENT? { _PyPegen_name_default_pair(p, a, c, tc) } + | a=param c=default tc=TYPE_COMMENT? &')' { _PyPegen_name_default_pair(p, a, c, tc) } +param_maybe_default[NameDefaultPair*]: + | a=param c=default? ',' tc=TYPE_COMMENT? { _PyPegen_name_default_pair(p, a, c, tc) } + | a=param c=default? tc=TYPE_COMMENT? &')' { _PyPegen_name_default_pair(p, a, c, tc) } +param[arg_ty]: a=NAME b=annotation? { _Py_arg(a->v.Name.id, b, NULL, EXTRA) } + +annotation[expr_ty]: ':' a=expression { a } +default[expr_ty]: '=' a=expression { a } + +decorators[asdl_seq*]: a=('@' f=named_expression NEWLINE { f })+ { a } + +class_def[stmt_ty]: + | a=decorators b=class_def_raw { _PyPegen_class_def_decorators(p, a, b) } + | class_def_raw +class_def_raw[stmt_ty]: + | 'class' a=NAME b=['(' z=[arguments] ')' { z }] ':' c=block { + _Py_ClassDef(a->v.Name.id, + (b) ? ((expr_ty) b)->v.Call.args : NULL, + (b) ? ((expr_ty) b)->v.Call.keywords : NULL, + c, NULL, EXTRA) } + +block[asdl_seq*] (memo): + | NEWLINE INDENT a=statements DEDENT { a } + | simple_stmt + | invalid_block + +star_expressions[expr_ty]: + | a=star_expression b=(',' c=star_expression { c })+ [','] { + _Py_Tuple(CHECK(_PyPegen_seq_insert_in_front(p, a, b)), Load, EXTRA) } + | a=star_expression ',' { _Py_Tuple(CHECK(_PyPegen_singleton_seq(p, a)), Load, EXTRA) } + | star_expression +star_expression[expr_ty] (memo): + | '*' a=bitwise_or { _Py_Starred(a, Load, EXTRA) } + | expression + +star_named_expressions[asdl_seq*]: a=','.star_named_expression+ [','] { a } +star_named_expression[expr_ty]: + | '*' a=bitwise_or { _Py_Starred(a, Load, EXTRA) } + | named_expression +named_expression[expr_ty]: + | a=NAME ':=' ~ b=expression { _Py_NamedExpr(CHECK(_PyPegen_set_expr_context(p, a, Store)), b, EXTRA) } + | expression !':=' + | invalid_named_expression + +annotated_rhs[expr_ty]: yield_expr | star_expressions + +expressions[expr_ty]: + | a=expression b=(',' c=expression { c })+ [','] { + _Py_Tuple(CHECK(_PyPegen_seq_insert_in_front(p, a, b)), Load, EXTRA) } + | a=expression ',' { _Py_Tuple(CHECK(_PyPegen_singleton_seq(p, a)), Load, EXTRA) } + | expression +expression[expr_ty] (memo): + | a=disjunction 'if' b=disjunction 'else' c=expression { _Py_IfExp(b, a, c, EXTRA) } + | disjunction + | lambdef + +lambdef[expr_ty]: + | 'lambda' a=[lambda_params] ':' b=expression { _Py_Lambda((a) ? a : CHECK(_PyPegen_empty_arguments(p)), b, EXTRA) } + +lambda_params[arguments_ty]: + | invalid_lambda_parameters + | lambda_parameters + +# lambda_parameters etc. duplicates parameters but without annotations +# or type comments, and if there's no comma after a parameter, we expect +# a colon, not a close parenthesis. (For more, see parameters above.) +# +lambda_parameters[arguments_ty]: + | a=lambda_slash_no_default b=lambda_param_no_default* c=lambda_param_with_default* d=[lambda_star_etc] { + _PyPegen_make_arguments(p, a, NULL, b, c, d) } + | a=lambda_slash_with_default b=lambda_param_with_default* c=[lambda_star_etc] { + _PyPegen_make_arguments(p, NULL, a, NULL, b, c) } + | a=lambda_param_no_default+ b=lambda_param_with_default* c=[lambda_star_etc] { + _PyPegen_make_arguments(p, NULL, NULL, a, b, c) } + | a=lambda_param_with_default+ b=[lambda_star_etc] { _PyPegen_make_arguments(p, NULL, NULL, NULL, a, b)} + | a=lambda_star_etc { _PyPegen_make_arguments(p, NULL, NULL, NULL, NULL, a) } + +lambda_slash_no_default[asdl_seq*]: + | a=lambda_param_no_default+ '/' ',' { a } + | a=lambda_param_no_default+ '/' &':' { a } +lambda_slash_with_default[SlashWithDefault*]: + | a=lambda_param_no_default* b=lambda_param_with_default+ '/' ',' { _PyPegen_slash_with_default(p, a, b) } + | a=lambda_param_no_default* b=lambda_param_with_default+ '/' &':' { _PyPegen_slash_with_default(p, a, b) } + +lambda_star_etc[StarEtc*]: + | '*' a=lambda_param_no_default b=lambda_param_maybe_default* c=[lambda_kwds] { + _PyPegen_star_etc(p, a, b, c) } + | '*' ',' b=lambda_param_maybe_default+ c=[lambda_kwds] { + _PyPegen_star_etc(p, NULL, b, c) } + | a=lambda_kwds { _PyPegen_star_etc(p, NULL, NULL, a) } + | invalid_lambda_star_etc + +lambda_kwds[arg_ty]: '**' a=lambda_param_no_default { a } + +lambda_param_no_default[arg_ty]: + | a=lambda_param ',' { a } + | a=lambda_param &':' { a } +lambda_param_with_default[NameDefaultPair*]: + | a=lambda_param c=default ',' { _PyPegen_name_default_pair(p, a, c, NULL) } + | a=lambda_param c=default &':' { _PyPegen_name_default_pair(p, a, c, NULL) } +lambda_param_maybe_default[NameDefaultPair*]: + | a=lambda_param c=default? ',' { _PyPegen_name_default_pair(p, a, c, NULL) } + | a=lambda_param c=default? &':' { _PyPegen_name_default_pair(p, a, c, NULL) } +lambda_param[arg_ty]: a=NAME { _Py_arg(a->v.Name.id, NULL, NULL, EXTRA) } + +disjunction[expr_ty] (memo): + | a=conjunction b=('or' c=conjunction { c })+ { _Py_BoolOp( + Or, + CHECK(_PyPegen_seq_insert_in_front(p, a, b)), + EXTRA) } + | conjunction +conjunction[expr_ty] (memo): + | a=inversion b=('and' c=inversion { c })+ { _Py_BoolOp( + And, + CHECK(_PyPegen_seq_insert_in_front(p, a, b)), + EXTRA) } + | inversion +inversion[expr_ty] (memo): + | 'not' a=inversion { _Py_UnaryOp(Not, a, EXTRA) } + | comparison +comparison[expr_ty]: + | a=bitwise_or b=compare_op_bitwise_or_pair+ { + _Py_Compare(a, CHECK(_PyPegen_get_cmpops(p, b)), CHECK(_PyPegen_get_exprs(p, b)), EXTRA) } + | bitwise_or +compare_op_bitwise_or_pair[CmpopExprPair*]: + | eq_bitwise_or + | noteq_bitwise_or + | lte_bitwise_or + | lt_bitwise_or + | gte_bitwise_or + | gt_bitwise_or + | notin_bitwise_or + | in_bitwise_or + | isnot_bitwise_or + | is_bitwise_or +eq_bitwise_or[CmpopExprPair*]: '==' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, Eq, a) } +noteq_bitwise_or[CmpopExprPair*]: + | (tok='!=' { _PyPegen_check_barry_as_flufl(p, tok) ? NULL : tok}) a=bitwise_or {_PyPegen_cmpop_expr_pair(p, NotEq, a) } +lte_bitwise_or[CmpopExprPair*]: '<=' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, LtE, a) } +lt_bitwise_or[CmpopExprPair*]: '<' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, Lt, a) } +gte_bitwise_or[CmpopExprPair*]: '>=' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, GtE, a) } +gt_bitwise_or[CmpopExprPair*]: '>' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, Gt, a) } +notin_bitwise_or[CmpopExprPair*]: 'not' 'in' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, NotIn, a) } +in_bitwise_or[CmpopExprPair*]: 'in' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, In, a) } +isnot_bitwise_or[CmpopExprPair*]: 'is' 'not' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, IsNot, a) } +is_bitwise_or[CmpopExprPair*]: 'is' a=bitwise_or { _PyPegen_cmpop_expr_pair(p, Is, a) } + +bitwise_or[expr_ty]: + | a=bitwise_or '|' b=bitwise_xor { _Py_BinOp(a, BitOr, b, EXTRA) } + | bitwise_xor +bitwise_xor[expr_ty]: + | a=bitwise_xor '^' b=bitwise_and { _Py_BinOp(a, BitXor, b, EXTRA) } + | bitwise_and +bitwise_and[expr_ty]: + | a=bitwise_and '&' b=shift_expr { _Py_BinOp(a, BitAnd, b, EXTRA) } + | shift_expr +shift_expr[expr_ty]: + | a=shift_expr '<<' b=sum { _Py_BinOp(a, LShift, b, EXTRA) } + | a=shift_expr '>>' b=sum { _Py_BinOp(a, RShift, b, EXTRA) } + | sum + +sum[expr_ty]: + | a=sum '+' b=term { _Py_BinOp(a, Add, b, EXTRA) } + | a=sum '-' b=term { _Py_BinOp(a, Sub, b, EXTRA) } + | term +term[expr_ty]: + | a=term '*' b=factor { _Py_BinOp(a, Mult, b, EXTRA) } + | a=term '/' b=factor { _Py_BinOp(a, Div, b, EXTRA) } + | a=term '//' b=factor { _Py_BinOp(a, FloorDiv, b, EXTRA) } + | a=term '%' b=factor { _Py_BinOp(a, Mod, b, EXTRA) } + | a=term '@' b=factor { CHECK_VERSION(5, "The '@' operator is", _Py_BinOp(a, MatMult, b, EXTRA)) } + | factor +factor[expr_ty] (memo): + | '+' a=factor { _Py_UnaryOp(UAdd, a, EXTRA) } + | '-' a=factor { _Py_UnaryOp(USub, a, EXTRA) } + | '~' a=factor { _Py_UnaryOp(Invert, a, EXTRA) } + | power +power[expr_ty]: + | a=await_primary '**' b=factor { _Py_BinOp(a, Pow, b, EXTRA) } + | await_primary +await_primary[expr_ty] (memo): + | AWAIT a=primary { CHECK_VERSION(5, "Await expressions are", _Py_Await(a, EXTRA)) } + | primary +primary[expr_ty]: + | invalid_primary # must be before 'primay genexp' because of invalid_genexp + | a=primary '.' b=NAME { _Py_Attribute(a, b->v.Name.id, Load, EXTRA) } + | a=primary b=genexp { _Py_Call(a, CHECK(_PyPegen_singleton_seq(p, b)), NULL, EXTRA) } + | a=primary '(' b=[arguments] ')' { + _Py_Call(a, + (b) ? ((expr_ty) b)->v.Call.args : NULL, + (b) ? ((expr_ty) b)->v.Call.keywords : NULL, + EXTRA) } + | a=primary '[' b=slices ']' { _Py_Subscript(a, b, Load, EXTRA) } + | atom + +slices[expr_ty]: + | a=slice !',' { a } + | a=','.slice+ [','] { _Py_Tuple(a, Load, EXTRA) } +slice[expr_ty]: + | a=[expression] ':' b=[expression] c=[':' d=[expression] { d }] { _Py_Slice(a, b, c, EXTRA) } + | a=expression { a } +atom[expr_ty]: + | NAME + | 'True' { _Py_Constant(Py_True, NULL, EXTRA) } + | 'False' { _Py_Constant(Py_False, NULL, EXTRA) } + | 'None' { _Py_Constant(Py_None, NULL, EXTRA) } + | '__peg_parser__' { RAISE_SYNTAX_ERROR("You found it!") } + | &STRING strings + | NUMBER + | &'(' (tuple | group | genexp) + | &'[' (list | listcomp) + | &'{' (dict | set | dictcomp | setcomp) + | '...' { _Py_Constant(Py_Ellipsis, NULL, EXTRA) } + +strings[expr_ty] (memo): a=STRING+ { _PyPegen_concatenate_strings(p, a) } +list[expr_ty]: + | '[' a=[star_named_expressions] ']' { _Py_List(a, Load, EXTRA) } +listcomp[expr_ty]: + | '[' a=named_expression ~ b=for_if_clauses ']' { _Py_ListComp(a, b, EXTRA) } + | invalid_comprehension +tuple[expr_ty]: + | '(' a=[y=star_named_expression ',' z=[star_named_expressions] { _PyPegen_seq_insert_in_front(p, y, z) } ] ')' { + _Py_Tuple(a, Load, EXTRA) } +group[expr_ty]: + | '(' a=(yield_expr | named_expression) ')' { a } + | invalid_group +genexp[expr_ty]: + | '(' a=named_expression ~ b=for_if_clauses ')' { _Py_GeneratorExp(a, b, EXTRA) } + | invalid_comprehension +set[expr_ty]: '{' a=star_named_expressions '}' { _Py_Set(a, EXTRA) } +setcomp[expr_ty]: + | '{' a=named_expression ~ b=for_if_clauses '}' { _Py_SetComp(a, b, EXTRA) } + | invalid_comprehension +dict[expr_ty]: + | '{' a=[double_starred_kvpairs] '}' { + _Py_Dict(CHECK(_PyPegen_get_keys(p, a)), CHECK(_PyPegen_get_values(p, a)), EXTRA) } +dictcomp[expr_ty]: + | '{' a=kvpair b=for_if_clauses '}' { _Py_DictComp(a->key, a->value, b, EXTRA) } + | invalid_dict_comprehension +double_starred_kvpairs[asdl_seq*]: a=','.double_starred_kvpair+ [','] { a } +double_starred_kvpair[KeyValuePair*]: + | '**' a=bitwise_or { _PyPegen_key_value_pair(p, NULL, a) } + | kvpair +kvpair[KeyValuePair*]: a=expression ':' b=expression { _PyPegen_key_value_pair(p, a, b) } +for_if_clauses[asdl_seq*]: + | for_if_clause+ +for_if_clause[comprehension_ty]: + | ASYNC 'for' a=star_targets 'in' ~ b=disjunction c=('if' z=disjunction { z })* { + CHECK_VERSION(6, "Async comprehensions are", _Py_comprehension(a, b, c, 1, p->arena)) } + | 'for' a=star_targets 'in' ~ b=disjunction c=('if' z=disjunction { z })* { + _Py_comprehension(a, b, c, 0, p->arena) } + | invalid_for_target + +yield_expr[expr_ty]: + | 'yield' 'from' a=expression { _Py_YieldFrom(a, EXTRA) } + | 'yield' a=[star_expressions] { _Py_Yield(a, EXTRA) } + +arguments[expr_ty] (memo): + | a=args [','] &')' { a } + | invalid_arguments +args[expr_ty]: + | a=','.(starred_expression | named_expression !'=')+ b=[',' k=kwargs {k}] { _PyPegen_collect_call_seqs(p, a, b, EXTRA) } + | a=kwargs { _Py_Call(_PyPegen_dummy_name(p), + CHECK_NULL_ALLOWED(_PyPegen_seq_extract_starred_exprs(p, a)), + CHECK_NULL_ALLOWED(_PyPegen_seq_delete_starred_exprs(p, a)), + EXTRA) } +kwargs[asdl_seq*]: + | a=','.kwarg_or_starred+ ',' b=','.kwarg_or_double_starred+ { _PyPegen_join_sequences(p, a, b) } + | ','.kwarg_or_starred+ + | ','.kwarg_or_double_starred+ +starred_expression[expr_ty]: + | '*' a=expression { _Py_Starred(a, Load, EXTRA) } +kwarg_or_starred[KeywordOrStarred*]: + | a=NAME '=' b=expression { + _PyPegen_keyword_or_starred(p, CHECK(_Py_keyword(a->v.Name.id, b, EXTRA)), 1) } + | a=starred_expression { _PyPegen_keyword_or_starred(p, a, 0) } + | invalid_kwarg +kwarg_or_double_starred[KeywordOrStarred*]: + | a=NAME '=' b=expression { + _PyPegen_keyword_or_starred(p, CHECK(_Py_keyword(a->v.Name.id, b, EXTRA)), 1) } + | '**' a=expression { _PyPegen_keyword_or_starred(p, CHECK(_Py_keyword(NULL, a, EXTRA)), 1) } + | invalid_kwarg + +# NOTE: star_targets may contain *bitwise_or, targets may not. +star_targets[expr_ty]: + | a=star_target !',' { a } + | a=star_target b=(',' c=star_target { c })* [','] { + _Py_Tuple(CHECK(_PyPegen_seq_insert_in_front(p, a, b)), Store, EXTRA) } +star_targets_list_seq[asdl_seq*]: a=','.star_target+ [','] { a } +star_targets_tuple_seq[asdl_seq*]: + | a=star_target b=(',' c=star_target { c })+ [','] { _PyPegen_seq_insert_in_front(p, a, b) } + | a=star_target ',' { _PyPegen_singleton_seq(p, a) } +star_target[expr_ty] (memo): + | '*' a=(!'*' star_target) { + _Py_Starred(CHECK(_PyPegen_set_expr_context(p, a, Store)), Store, EXTRA) } + | target_with_star_atom +target_with_star_atom[expr_ty] (memo): + | a=t_primary '.' b=NAME !t_lookahead { _Py_Attribute(a, b->v.Name.id, Store, EXTRA) } + | a=t_primary '[' b=slices ']' !t_lookahead { _Py_Subscript(a, b, Store, EXTRA) } + | star_atom +star_atom[expr_ty]: + | a=NAME { _PyPegen_set_expr_context(p, a, Store) } + | '(' a=target_with_star_atom ')' { _PyPegen_set_expr_context(p, a, Store) } + | '(' a=[star_targets_tuple_seq] ')' { _Py_Tuple(a, Store, EXTRA) } + | '[' a=[star_targets_list_seq] ']' { _Py_List(a, Store, EXTRA) } + +single_target[expr_ty]: + | single_subscript_attribute_target + | a=NAME { _PyPegen_set_expr_context(p, a, Store) } + | '(' a=single_target ')' { a } +single_subscript_attribute_target[expr_ty]: + | a=t_primary '.' b=NAME !t_lookahead { _Py_Attribute(a, b->v.Name.id, Store, EXTRA) } + | a=t_primary '[' b=slices ']' !t_lookahead { _Py_Subscript(a, b, Store, EXTRA) } + +del_targets[asdl_seq*]: a=','.del_target+ [','] { a } +del_target[expr_ty] (memo): + | a=t_primary '.' b=NAME !t_lookahead { _Py_Attribute(a, b->v.Name.id, Del, EXTRA) } + | a=t_primary '[' b=slices ']' !t_lookahead { _Py_Subscript(a, b, Del, EXTRA) } + | del_t_atom +del_t_atom[expr_ty]: + | a=NAME { _PyPegen_set_expr_context(p, a, Del) } + | '(' a=del_target ')' { _PyPegen_set_expr_context(p, a, Del) } + | '(' a=[del_targets] ')' { _Py_Tuple(a, Del, EXTRA) } + | '[' a=[del_targets] ']' { _Py_List(a, Del, EXTRA) } + +targets[asdl_seq*]: a=','.target+ [','] { a } +target[expr_ty] (memo): + | a=t_primary '.' b=NAME !t_lookahead { _Py_Attribute(a, b->v.Name.id, Store, EXTRA) } + | a=t_primary '[' b=slices ']' !t_lookahead { _Py_Subscript(a, b, Store, EXTRA) } + | t_atom +t_primary[expr_ty]: + | a=t_primary '.' b=NAME &t_lookahead { _Py_Attribute(a, b->v.Name.id, Load, EXTRA) } + | a=t_primary '[' b=slices ']' &t_lookahead { _Py_Subscript(a, b, Load, EXTRA) } + | a=t_primary b=genexp &t_lookahead { _Py_Call(a, CHECK(_PyPegen_singleton_seq(p, b)), NULL, EXTRA) } + | a=t_primary '(' b=[arguments] ')' &t_lookahead { + _Py_Call(a, + (b) ? ((expr_ty) b)->v.Call.args : NULL, + (b) ? ((expr_ty) b)->v.Call.keywords : NULL, + EXTRA) } + | a=atom &t_lookahead { a } +t_lookahead: '(' | '[' | '.' +t_atom[expr_ty]: + | a=NAME { _PyPegen_set_expr_context(p, a, Store) } + | '(' a=target ')' { _PyPegen_set_expr_context(p, a, Store) } + | '(' b=[targets] ')' { _Py_Tuple(b, Store, EXTRA) } + | '[' b=[targets] ']' { _Py_List(b, Store, EXTRA) } + + +# From here on, there are rules for invalid syntax with specialised error messages +invalid_arguments: + | args ',' '*' { RAISE_SYNTAX_ERROR("iterable argument unpacking follows keyword argument unpacking") } + | a=expression for_if_clauses ',' [args | expression for_if_clauses] { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "Generator expression must be parenthesized") } + | a=args for_if_clauses { _PyPegen_nonparen_genexp_in_call(p, a) } + | args ',' a=expression for_if_clauses { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "Generator expression must be parenthesized") } + | a=args ',' args { _PyPegen_arguments_parsing_error(p, a) } +invalid_kwarg: + | !(NAME '=') a=expression b='=' { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION( + a, "expression cannot contain assignment, perhaps you meant \"==\"?") } +invalid_named_expression: + | a=expression ':=' expression { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION( + a, "cannot use assignment expressions with %s", _PyPegen_get_expr_name(a)) } +invalid_assignment: + | a=invalid_ann_assign_target ':' expression { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION( + a, + "only single target (not %s) can be annotated", + _PyPegen_get_expr_name(a) + )} + | a=star_named_expression ',' star_named_expressions* ':' expression { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "only single target (not tuple) can be annotated") } + | a=expression ':' expression { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "illegal target for annotation") } + | (star_targets '=')* a=star_expressions '=' { + RAISE_SYNTAX_ERROR_INVALID_TARGET(STAR_TARGETS, a) } + | (star_targets '=')* a=yield_expr '=' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "assignment to yield expression not possible") } + | a=star_expressions augassign (yield_expr | star_expressions) { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION( + a, + "'%s' is an illegal expression for augmented assignment", + _PyPegen_get_expr_name(a) + )} +invalid_ann_assign_target[expr_ty]: + | list + | tuple + | '(' a=invalid_ann_assign_target ')' { a } +invalid_del_stmt: + | 'del' a=star_expressions { + RAISE_SYNTAX_ERROR_INVALID_TARGET(DEL_TARGETS, a) } +invalid_block: + | NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block") } +invalid_primary: + | primary a='{' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "invalid syntax") } +invalid_comprehension: + | ('[' | '(' | '{') a=starred_expression for_if_clauses { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "iterable unpacking cannot be used in comprehension") } +invalid_dict_comprehension: + | '{' a='**' bitwise_or for_if_clauses '}' { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "dict unpacking cannot be used in dict comprehension") } +invalid_parameters: + | param_no_default* (slash_with_default | param_with_default+) param_no_default { + RAISE_SYNTAX_ERROR("non-default argument follows default argument") } +invalid_lambda_parameters: + | lambda_param_no_default* (lambda_slash_with_default | lambda_param_with_default+) lambda_param_no_default { + RAISE_SYNTAX_ERROR("non-default argument follows default argument") } +invalid_star_etc: + | '*' (')' | ',' (')' | '**')) { RAISE_SYNTAX_ERROR("named arguments must follow bare *") } + | '*' ',' TYPE_COMMENT { RAISE_SYNTAX_ERROR("bare * has associated type comment") } +invalid_lambda_star_etc: + | '*' (':' | ',' (':' | '**')) { RAISE_SYNTAX_ERROR("named arguments must follow bare *") } +invalid_double_type_comments: + | TYPE_COMMENT NEWLINE TYPE_COMMENT NEWLINE INDENT { + RAISE_SYNTAX_ERROR("Cannot have two type comments on def") } +invalid_with_item: + | expression 'as' a=expression { + RAISE_SYNTAX_ERROR_INVALID_TARGET(STAR_TARGETS, a) } + +invalid_for_target: + | ASYNC? 'for' a=star_expressions { + RAISE_SYNTAX_ERROR_INVALID_TARGET(FOR_TARGETS, a) } + +invalid_group: + | '(' a=starred_expression ')' { + RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "can't use starred expression here") } +invalid_import_from_targets: + | import_from_as_names ',' { + RAISE_SYNTAX_ERROR("trailing comma not allowed without surrounding parentheses") } \ No newline at end of file diff --git a/native/libcst/LICENSE b/native/libcst/LICENSE new file mode 100644 index 00000000..5594616f --- /dev/null +++ b/native/libcst/LICENSE @@ -0,0 +1,102 @@ +All contributions towards LibCST are MIT licensed. + +Some Python files have been derived from the standard library and are therefore +PSF licensed. Modifications on these files are dual licensed (both MIT and +PSF). These files are: + +- libcst/_parser/base_parser.py +- libcst/_parser/parso/utils.py +- libcst/_parser/parso/pgen2/generator.py +- libcst/_parser/parso/pgen2/grammar_parser.py +- libcst/_parser/parso/python/py_token.py +- libcst/_parser/parso/python/tokenize.py +- libcst/_parser/parso/tests/test_fstring.py +- libcst/_parser/parso/tests/test_tokenize.py +- libcst/_parser/parso/tests/test_utils.py +- native/libcst/src/tokenizer/core/mod.rs +- native/libcst/src/tokenizer/core/string_types.rs + +Some Python files have been taken from dataclasses and are therefore Apache +licensed. Modifications on these files are licensed under Apache 2.0 license. +These files are: + +- libcst/_add_slots.py + +------------------------------------------------------------------------------- + +MIT License + +Copyright (c) Meta Platforms, Inc. and affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------------------------------------------------------------------- + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +are retained in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + +------------------------------------------------------------------------------- + +APACHE LICENSE, VERSION 2.0 + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/native/libcst/README.md b/native/libcst/README.md new file mode 100644 index 00000000..09fe8f99 --- /dev/null +++ b/native/libcst/README.md @@ -0,0 +1,122 @@ +# libcst/native + +A native extension to enable parsing of new Python grammar in LibCST. + +The extension is written in Rust, and exposed to Python using [PyO3](https://pyo3.rs/). +This is packaged together with libcst, and can be imported from `libcst.native`. By default +the LibCST APIs use this module for all parsing. + +Later on, the parser library might be packaged separately as +[a Rust crate](https://crates.io). Pull requests towards this are much appreciated. + +## Goals + +1. Adopt the CPython grammar definition as closely as possible to reduce maintenance + burden. This means using a PEG parser. +2. Feature-parity with the pure-python LibCST parser: the API should be easy to use from + Python, support parsing with a target version, bytes and strings as inputs, etc. +3. [future] Performance. The aspirational goal is to be within 2x CPython performance, + which would enable LibCST to be used in interactive use cases (think IDEs). +4. [future] Error recovery. The parser should be able to handle partially complete + documents, returning a CST for the syntactically correct parts, and a list of errors + found. + +## Structure + +The extension is organized into two rust crates: `libcst_derive` contains some macros to +facilitate various features of CST nodes, and `libcst` contains the `parser` itself +(including the Python grammar), a `tokenizer` implementation by @bgw, and a very basic +representation of CST `nodes`. Parsing is done by +1. **tokenizing** the input utf-8 string (bytes are not supported at the Rust layer, + they are converted to utf-8 strings by the python wrapper) +2. running the **PEG parser** on the tokenized input, which also captures certain anchor + tokens in the resulting syntax tree +3. using the anchor tokens to **inflate** the syntax tree into a proper CST + +These steps are wrapped into a high-level `parse_module` API +[here](https://github.com/Instagram/LibCST/blob/main/native/libcst/src/lib.rs#L43), +along with `parse_statement` and `parse_expression` functions which all just accept the +input string and an optional encoding. + +These Rust functions are exposed to Python +[here](https://github.com/Instagram/LibCST/blob/main/native/libcst/src/py.rs) using the +excellent [PyO3](https://pyo3.rs/) library, plus an `IntoPy` trait which is mostly +implemented via a macro in `libcst_derive`. + + +## Hacking + +### Nodes +All CST nodes are marked with the `#[cst_node]` proc macro, which duplicates the node types; for a node named `Foo`, there's: + +- `DeflatedFoo`, which is the output of the parsing phase and isn't exposed through the + API of the crate. + - it has two lifetime parameters: `'r` (or `'input` in the grammar) is the lifetime of + `Token` references, and `'a` is the lifetime of `str` slices from the original input + - `TokenRef` fields are contained here, while whitespace fields aren't + - if there aren't any fields that refer to other CST nodes or `TokenRef`s, there's an + extra (private) `_phantom` field that "contains" the two lifetime parameters (this + is to make the type parameters of all `DeflatedFoo` types uniform) + - it implements the `Inflate` trait, which converts `DeflatedFoo` into `Foo` +- `Foo`, which is what's publicly exposed in the crate and is the output of `Inflate`ing `DeflatedFoo`. + - it only retains the second (`'a`) lifetime parameter of `DeflatedFoo` to refer back to slices of the original input string + - whitespace fields are contained here, but `TokenRef`s aren't + - `IntoPy` is implemented for it (assuming the `py` crate feature is enabled), which contains code to translate `Foo` back into a Python object; hence, the fields on `Foo` match the Python CST node implementations (barring fields marked with `#[skip_py]`) + +### Grammar + +The grammar is mostly a straightforward translation from the [CPython +grammar](https://github.com/python/cpython/blob/main/Grammar/python.gram), with some +exceptions: + +* The output of grammar rules are deflated CST nodes that capture the AST plus + additional anchor token references used for whitespace parsing later on. +* Rules in the grammar must be strongly typed, as enforced by the Rust compiler. The + CPython grammar rules are a bit more loosely-typed in comparison. +* Some features in the CPython peg parser are not supported by rust-peg: keywords, + mutually recursive rules, special `invalid_` rules, the `~` operator, terminating the + parser early. + +The PEG parser is run on a `Vec` of `Token`s (more precisely `&'input Vec>`), +and tries its best to avoid allocating any strings, working only with references. As +such, the output nodes don't own any strings, but refer to slices of the original input +(hence the `'input, 'a` lifetime parameters on almost all nodes). + +### Whitespace parsing + +The `Inflate` trait is responsible for taking a "deflated", skeleton CST node, and +parsing out the relevant whitespace from the anchor tokens to produce an "inflated" +(normal) CST node. In addition to the deflated node, inflation requires a whitespace +config object which contains global information required for certain aspects of +whitespace parsing, like the default indentation. + +Inflation consumes the deflated node, while mutating the tokens referenced by it. This +is important to make sure whitespace is only ever assigned to at most one CST node. The +`Inflate` trait implementation needs to ensure that all whitespace is assigned to a CST +node; this is generally verified using roundtrip tests (i.e. parsing code and then +generating it back to then assert the original and generated are byte-by-byte equal). + +The general convention is that the top-most possible node owns a certain piece of +whitespace, which should be straightforward to achieve in a top-down parser like +`Inflate`. In cases where whitespace is shared between sibling nodes, usually the +leftmost node owns the whitespace except in the case of trailing commas and closing +parentheses, where the latter owns the whitespace (for backwards compatibility with the +pure python parser). See the implementation of `inflate_element` for how this is done. + +### Tests + +In addition to running the python test suite, you can run some tests written in rust +with + +``` +cd native +cargo test +``` + +These include unit and roundtrip tests. + +Additionally, some benchmarks can be run on x86-based architectures using `cargo bench`. + +### Code Formatting + +Use `cargo fmt` to format your code. diff --git a/native/libcst/benches/parser_benchmark.rs b/native/libcst/benches/parser_benchmark.rs new file mode 100644 index 00000000..4987022a --- /dev/null +++ b/native/libcst/benches/parser_benchmark.rs @@ -0,0 +1,170 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use std::{ + path::{Component, PathBuf}, + time::Duration, +}; + +use criterion::{ + black_box, criterion_group, criterion_main, measurement::Measurement, BatchSize, BenchmarkId, + Criterion, Throughput, +}; +use itertools::Itertools; +use rayon::prelude::*; + +use libcst_native::{ + parse_module, parse_tokens_without_whitespace, tokenize, Codegen, Config, Inflate, +}; + +#[cfg(not(windows))] +const NEWLINE: &str = "\n"; +#[cfg(windows)] +const NEWLINE: &str = "\r\n"; + +fn load_all_fixtures_vec() -> Vec { + let mut path = PathBuf::from(file!()); + path.pop(); + path.pop(); + path = path + .components() + .skip(1) + .chain( + vec!["tests".as_ref(), "fixtures".as_ref()] + .into_iter() + .map(Component::Normal), + ) + .collect(); + + path.read_dir() + .expect("read_dir") + .into_iter() + .map(|file| { + let path = file.unwrap().path(); + std::fs::read_to_string(&path).expect("reading_file") + }) + .collect() +} + +fn load_all_fixtures() -> String { + load_all_fixtures_vec().join(NEWLINE) +} + +pub fn inflate_benchmarks(c: &mut Criterion) { + let fixture = load_all_fixtures(); + let tokens = tokenize(fixture.as_str()).expect("tokenize failed"); + let tokvec = tokens.clone().into(); + let mut group = c.benchmark_group("inflate"); + group.bench_function("all", |b| { + b.iter_batched( + || { + let conf = Config::new(fixture.as_str(), &tokens); + let m = parse_tokens_without_whitespace(&tokvec, fixture.as_str(), None) + .expect("parse failed"); + (conf, m) + }, + |(conf, m)| black_box(m.inflate(&conf)), + BatchSize::SmallInput, + ) + }); + group.finish(); +} + +pub fn parser_benchmarks(c: &mut Criterion) { + let fixture = load_all_fixtures(); + let mut group = c.benchmark_group("parse"); + group.measurement_time(Duration::from_secs(15)); + group.bench_function("all", |b| { + b.iter_batched( + || tokenize(fixture.as_str()).expect("tokenize failed").into(), + |tokens| { + black_box(drop(parse_tokens_without_whitespace( + &tokens, + fixture.as_str(), + None, + ))) + }, + BatchSize::SmallInput, + ) + }); + group.finish(); +} + +pub fn codegen_benchmarks(c: &mut Criterion) { + let input = load_all_fixtures(); + let m = parse_module(input.as_str(), None).expect("parse failed"); + let mut group = c.benchmark_group("codegen"); + group.bench_function("all", |b| { + b.iter(|| { + let mut state = Default::default(); + #[allow(clippy::unit_arg)] + black_box(m.codegen(&mut state)); + }) + }); + group.finish(); +} + +pub fn tokenize_benchmarks(c: &mut Criterion) { + let input = load_all_fixtures(); + let mut group = c.benchmark_group("tokenize"); + group.measurement_time(Duration::from_secs(15)); + group.bench_function("all", |b| b.iter(|| black_box(tokenize(input.as_str())))); + group.finish(); +} + +pub fn parse_into_cst_benchmarks(c: &mut Criterion) { + let fixture = load_all_fixtures(); + let mut group = c.benchmark_group("parse_into_cst"); + group.measurement_time(Duration::from_secs(15)); + group.bench_function("all", |b| { + b.iter(|| black_box(parse_module(&fixture, None))) + }); + group.finish(); +} + +pub fn parse_into_cst_multithreaded_benchmarks( + c: &mut Criterion, +) where + ::Value: Send, +{ + let fixtures = load_all_fixtures_vec(); + let mut group = c.benchmark_group("parse_into_cst_parallel"); + group.measurement_time(Duration::from_secs(15)); + group.warm_up_time(Duration::from_secs(5)); + + for thread_count in 1..10 { + let expanded_fixtures = (0..thread_count) + .flat_map(|_| fixtures.clone()) + .collect_vec(); + group.throughput(Throughput::Elements(expanded_fixtures.len() as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(thread_count), + &thread_count, + |b, thread_count| { + let thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(*thread_count) + .build() + .unwrap(); + thread_pool.install(|| { + b.iter_with_large_drop(|| { + expanded_fixtures + .par_iter() + .map(|contents| black_box(parse_module(&contents, None))) + .collect::>() + }); + }); + }, + ); + } + + group.finish(); +} + +criterion_group!( + name=benches; + config=Criterion::default(); + targets=parser_benchmarks, codegen_benchmarks, inflate_benchmarks, tokenize_benchmarks, parse_into_cst_benchmarks, parse_into_cst_multithreaded_benchmarks +); +criterion_main!(benches); diff --git a/native/libcst/src/bin.rs b/native/libcst/src/bin.rs new file mode 100644 index 00000000..1517cce4 --- /dev/null +++ b/native/libcst/src/bin.rs @@ -0,0 +1,33 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use libcst_native::*; +use std::{ + env, + io::{self, Read}, + process::exit, +}; + +pub fn main() { + let mut str = std::string::String::new(); + io::stdin().read_to_string(&mut str).unwrap(); + match parse_module(str.as_ref(), None) { + Err(e) => { + eprintln!("{}", prettify_error(e, "stdin")); + exit(1); + } + Ok(m) => { + let first_arg = env::args().nth(1).unwrap_or_else(|| "".to_string()); + if first_arg == "-d" { + println!("{:#?}", m); + } + if first_arg != "-n" { + let mut state = Default::default(); + m.codegen(&mut state); + print!("{}", state.to_string()); + } + } + }; +} diff --git a/native/libcst/src/lib.rs b/native/libcst/src/lib.rs new file mode 100644 index 00000000..113d5d02 --- /dev/null +++ b/native/libcst/src/lib.rs @@ -0,0 +1,213 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::cmp::{max, min}; + +pub mod tokenizer; + +pub use tokenizer::whitespace_parser::Config; +use tokenizer::{whitespace_parser, TokConfig, Token, TokenIterator}; + +mod nodes; +use nodes::deflated::Module as DeflatedModule; +pub use nodes::*; + +mod parser; +use parser::{ParserError, Result, TokVec}; + +#[cfg(feature = "py")] +pub mod py; + +pub fn tokenize(text: &str) -> Result> { + let iter = TokenIterator::new( + text, + &TokConfig { + async_hacks: false, + split_ftstring: true, + }, + ); + + iter.collect::, _>>() + .map_err(|err| ParserError::TokenizerError(err, text)) +} + +pub fn parse_module<'a>( + mut module_text: &'a str, + encoding: Option<&str>, +) -> Result<'a, Module<'a>> { + // Strip UTF-8 BOM + if let Some(stripped) = module_text.strip_prefix('\u{feff}') { + module_text = stripped; + } + let tokens = tokenize(module_text)?; + let conf = whitespace_parser::Config::new(module_text, &tokens); + let tokvec = tokens.into(); + let m = parse_tokens_without_whitespace(&tokvec, module_text, encoding)?; + Ok(m.inflate(&conf)?) +} + +pub fn parse_tokens_without_whitespace<'r, 'a>( + tokens: &'r TokVec<'a>, + module_text: &'a str, + encoding: Option<&str>, +) -> Result<'a, DeflatedModule<'r, 'a>> { + let m = parser::python::file(tokens, module_text, encoding) + .map_err(|err| ParserError::ParserError(err, module_text))?; + Ok(m) +} + +pub fn parse_statement(text: &str) -> Result { + let tokens = tokenize(text)?; + let conf = whitespace_parser::Config::new(text, &tokens); + let tokvec = tokens.into(); + let stm = parser::python::statement_input(&tokvec, text) + .map_err(|err| ParserError::ParserError(err, text))?; + Ok(stm.inflate(&conf)?) +} + +pub fn parse_expression(text: &str) -> Result { + let tokens = tokenize(text)?; + let conf = whitespace_parser::Config::new(text, &tokens); + let tokvec = tokens.into(); + let expr = parser::python::expression_input(&tokvec, text) + .map_err(|err| ParserError::ParserError(err, text))?; + Ok(expr.inflate(&conf)?) +} + +// n starts from 1 +fn bol_offset(source: &str, n: i32) -> usize { + if n <= 1 { + return 0; + } + source + .match_indices('\n') + .nth((n - 2) as usize) + .map(|(index, _)| index + 1) + .unwrap_or_else(|| source.len()) +} + +pub fn prettify_error(err: ParserError, label: &str) -> std::string::String { + match err { + ParserError::ParserError(e, module_text) => { + use annotate_snippets::{Level, Renderer, Snippet}; + + let loc = e.location; + let context = 1; + let line_start = max( + 1, + loc.start_pos + .line + .checked_sub(context as usize) + .unwrap_or(1), + ); + let start_offset = bol_offset(module_text, loc.start_pos.line as i32 - context); + let end_offset = bol_offset(module_text, loc.end_pos.line as i32 + context + 1); + let source = &module_text[start_offset..end_offset]; + let start = loc.start_pos.offset - start_offset; + let end = loc.end_pos.offset - start_offset; + let end = if start == end { + min(end + 1, end_offset - start_offset + 1) + } else { + end + }; + Renderer::styled() + .render( + Level::Error.title(label).snippet( + Snippet::source(source) + .line_start(line_start) + .fold(false) + .annotations(vec![Level::Error.span(start..end).label(&format!( + "expected {} {} -> {}", + e.expected, loc.start_pos, loc.end_pos + ))]), + ), + ) + .to_string() + } + e => format!("Parse error for {}: {}", label, e), + } +} + +#[cfg(test)] +mod test { + use super::*; + use tokenizer::TokError; + + #[test] + fn test_simple() { + let n = parse_module("1_", None); + assert_eq!( + n.err().unwrap(), + ParserError::TokenizerError(TokError::BadDecimal, "1_") + ); + } + + #[test] + fn test_bare_minimum_funcdef() { + parse_module("def f(): ...", None).expect("parse error"); + } + + #[test] + fn test_funcdef_params() { + parse_module("def g(a, b): ...", None).expect("parse error"); + } + + #[test] + fn test_single_statement_with_no_newline() { + for src in &[ + "(\n \\\n)", + "(\n \\\n)", + "(\n '''\n''')", + "del _", + "if _:\n '''\n)'''", + "if _:\n ('''\n''')", + "if _:\n '''\n '''", + "if _:\n '''\n ''' ", + ] { + parse_module(src, None).unwrap_or_else(|e| panic!("'{}' doesn't parse: {}", src, e)); + } + } + + #[test] + fn bol_offset_first_line() { + assert_eq!(0, bol_offset("hello", 1)); + assert_eq!(0, bol_offset("hello", 0)); + assert_eq!(0, bol_offset("hello\nhello", 1)); + assert_eq!(0, bol_offset("hello\nhello", 0)); + } + + #[test] + fn bol_offset_second_line() { + assert_eq!(5, bol_offset("hello", 2)); + assert_eq!(6, bol_offset("hello\nhello", 2)); + assert_eq!(6, bol_offset("hello\nhello\nhello", 2)); + } + + #[test] + fn bol_offset_last_line() { + assert_eq!(5, bol_offset("hello", 3)); + assert_eq!(11, bol_offset("hello\nhello", 3)); + assert_eq!(12, bol_offset("hello\nhello\nhello", 3)); + } + #[test] + fn test_tstring_basic() { + assert!( + parse_module("t'hello'", None).is_ok(), + "Failed to parse t'hello'" + ); + assert!( + parse_module("t'{hello}'", None).is_ok(), + "Failed to parse t'{{hello}}'" + ); + assert!( + parse_module("t'{hello:r}'", None).is_ok(), + "Failed to parse t'{{hello:r}}'" + ); + assert!( + parse_module("f'line1\\n{hello:r}\\nline2'", None).is_ok(), + "Failed to parse t'line1\\n{{hello:r}}\\nline2'" + ); + } +} diff --git a/native/libcst/src/nodes/codegen.rs b/native/libcst/src/nodes/codegen.rs new file mode 100644 index 00000000..1ebf8d80 --- /dev/null +++ b/native/libcst/src/nodes/codegen.rs @@ -0,0 +1,65 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::fmt; +#[derive(Debug)] +pub struct CodegenState<'a> { + pub tokens: String, + pub indent_tokens: Vec<&'a str>, + pub default_newline: &'a str, + pub default_indent: &'a str, +} + +impl<'a> CodegenState<'a> { + pub fn indent(&mut self, v: &'a str) { + self.indent_tokens.push(v); + } + pub fn dedent(&mut self) { + self.indent_tokens.pop(); + } + pub fn add_indent(&mut self) { + self.tokens.extend(self.indent_tokens.iter().cloned()); + } + pub fn add_token(&mut self, tok: &'a str) { + self.tokens.push_str(tok); + } +} + +impl<'a> fmt::Display for CodegenState<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.tokens) + } +} + +pub trait Codegen<'a> { + fn codegen(&self, state: &mut CodegenState<'a>); +} + +impl<'a, T> Codegen<'a> for Option +where + T: Codegen<'a>, +{ + fn codegen(&self, state: &mut CodegenState<'a>) { + if let Some(s) = &self { + s.codegen(state); + } + } +} + +#[cfg(windows)] +const LINE_ENDING: &str = "\r\n"; +#[cfg(not(windows))] +const LINE_ENDING: &str = "\n"; + +impl<'a> Default for CodegenState<'a> { + fn default() -> Self { + Self { + default_newline: LINE_ENDING, + default_indent: " ", + indent_tokens: Default::default(), + tokens: Default::default(), + } + } +} diff --git a/native/libcst/src/nodes/expression.rs b/native/libcst/src/nodes/expression.rs new file mode 100644 index 00000000..c72d301d --- /dev/null +++ b/native/libcst/src/nodes/expression.rs @@ -0,0 +1,2745 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::mem::swap; + +use crate::{ + inflate_helpers::adjust_parameters_trailing_whitespace, + nodes::{ + op::*, + statement::*, + traits::{Inflate, ParenthesizedDeflatedNode, ParenthesizedNode, Result, WithComma}, + whitespace::ParenthesizableWhitespace, + Annotation, AssignEqual, AssignTargetExpression, BinaryOp, BooleanOp, Codegen, + CodegenState, Colon, Comma, CompOp, Dot, UnaryOp, + }, + tokenizer::{ + whitespace_parser::{parse_parenthesizable_whitespace, Config}, + Token, + }, +}; +#[cfg(feature = "py")] +use libcst_derive::TryIntoPy; +use libcst_derive::{cst_node, Codegen, Inflate, ParenthesizedDeflatedNode, ParenthesizedNode}; + +type TokenRef<'r, 'a> = &'r Token<'a>; + +#[cst_node(Default)] +pub struct Parameters<'a> { + pub params: Vec>, + pub star_arg: Option>, + pub kwonly_params: Vec>, + pub star_kwarg: Option>, + pub posonly_params: Vec>, + pub posonly_ind: Option>, +} + +impl<'a> Parameters<'a> { + pub fn is_empty(&self) -> bool { + self.params.is_empty() + && self.star_arg.is_none() + && self.kwonly_params.is_empty() + && self.star_kwarg.is_none() + && self.posonly_params.is_empty() + && self.posonly_ind.is_none() + } +} + +impl<'r, 'a> DeflatedParameters<'r, 'a> { + pub fn is_empty(&self) -> bool { + self.params.is_empty() + && self.star_arg.is_none() + && self.kwonly_params.is_empty() + && self.star_kwarg.is_none() + && self.posonly_params.is_empty() + && self.posonly_ind.is_none() + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedParameters<'r, 'a> { + type Inflated = Parameters<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let posonly_params = self.posonly_params.inflate(config)?; + let posonly_ind = self.posonly_ind.inflate(config)?; + let params = self.params.inflate(config)?; + let star_arg = self.star_arg.inflate(config)?; + let kwonly_params = self.kwonly_params.inflate(config)?; + let star_kwarg = self.star_kwarg.inflate(config)?; + Ok(Self::Inflated { + params, + star_arg, + kwonly_params, + star_kwarg, + posonly_params, + posonly_ind, + }) + } +} + +#[cst_node(Inflate)] +pub enum StarArg<'a> { + Star(Box>), + Param(Box>), +} + +impl<'a> Codegen<'a> for Parameters<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + let params_after_kwonly = self.star_kwarg.is_some(); + let params_after_regular = !self.kwonly_params.is_empty() || params_after_kwonly; + let params_after_posonly = !self.params.is_empty() || params_after_regular; + let star_included = self.star_arg.is_some() || !self.kwonly_params.is_empty(); + + for p in &self.posonly_params { + p.codegen(state, None, true); + } + + match &self.posonly_ind { + Some(ind) => ind.codegen(state, params_after_posonly), + _ => { + if !self.posonly_params.is_empty() { + if params_after_posonly { + state.add_token("/, "); + } else { + state.add_token("/"); + } + } + } + } + + let param_size = self.params.len(); + for (i, p) in self.params.iter().enumerate() { + p.codegen(state, None, params_after_regular || i < param_size - 1); + } + + let kwonly_size = self.kwonly_params.len(); + match &self.star_arg { + None => { + if star_included { + state.add_token("*, ") + } + } + Some(StarArg::Param(p)) => p.codegen( + state, + Some("*"), + kwonly_size > 0 || self.star_kwarg.is_some(), + ), + Some(StarArg::Star(s)) => s.codegen(state), + } + + for (i, p) in self.kwonly_params.iter().enumerate() { + p.codegen(state, None, params_after_kwonly || i < kwonly_size - 1); + } + + if let Some(star) = &self.star_kwarg { + star.codegen(state, Some("**"), false) + } + } +} + +#[cst_node] +pub struct ParamSlash<'a> { + pub comma: Option>, + pub whitespace_after: ParenthesizableWhitespace<'a>, + + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> ParamSlash<'a> { + fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { + state.add_token("/"); + self.whitespace_after.codegen(state); + match (&self.comma, default_comma) { + (Some(comma), _) => comma.codegen(state), + (None, true) => state.add_token(", "), + _ => {} + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedParamSlash<'r, 'a> { + type Inflated = ParamSlash<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after = + parse_parenthesizable_whitespace(config, &mut self.tok.whitespace_after.borrow_mut())?; + let comma = self.comma.inflate(config)?; + Ok(Self::Inflated { + comma, + whitespace_after, + }) + } +} + +#[cst_node] +pub struct ParamStar<'a> { + pub comma: Comma<'a>, +} + +impl<'a> Codegen<'a> for ParamStar<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("*"); + self.comma.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedParamStar<'r, 'a> { + type Inflated = ParamStar<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let comma = self.comma.inflate(config)?; + Ok(Self::Inflated { comma }) + } +} + +#[cst_node(ParenthesizedNode, Default)] +pub struct Name<'a> { + pub value: &'a str, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedName<'r, 'a> { + type Inflated = Name<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + value: self.value, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for Name<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token(self.value); + }); + } +} + +#[cst_node] +pub struct Param<'a> { + pub name: Name<'a>, + pub annotation: Option>, + pub equal: Option>, + pub default: Option>, + + pub comma: Option>, + + pub star: Option<&'a str>, + + pub whitespace_after_star: ParenthesizableWhitespace<'a>, + pub whitespace_after_param: ParenthesizableWhitespace<'a>, + + pub(crate) star_tok: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedParam<'r, 'a> { + type Inflated = Param<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let name = self.name.inflate(config)?; + let annotation = self.annotation.inflate(config)?; + let equal = self.equal.inflate(config)?; + let default = self.default.inflate(config)?; + let comma = self.comma.inflate(config)?; + let whitespace_after_star = if let Some(star_tok) = self.star_tok.as_mut() { + parse_parenthesizable_whitespace(config, &mut star_tok.whitespace_after.borrow_mut())? + } else { + Default::default() + }; + let whitespace_after_param = Default::default(); // TODO + Ok(Self::Inflated { + name, + annotation, + equal, + default, + comma, + star: self.star, + whitespace_after_star, + whitespace_after_param, + }) + } +} + +impl<'r, 'a> Default for DeflatedParam<'r, 'a> { + fn default() -> Self { + Self { + name: Default::default(), + annotation: None, + equal: None, + default: None, + comma: None, + star: Some(""), // Note: this preserves a quirk of the pure python parser + star_tok: None, + } + } +} + +impl<'a> Param<'a> { + fn codegen( + &self, + state: &mut CodegenState<'a>, + default_star: Option<&'a str>, + default_comma: bool, + ) { + match (self.star, default_star) { + (Some(star), _) => state.add_token(star), + (None, Some(star)) => state.add_token(star), + _ => {} + } + self.whitespace_after_star.codegen(state); + self.name.codegen(state); + + if let Some(ann) = &self.annotation { + ann.codegen(state, ":"); + } + + match (&self.equal, &self.default) { + (Some(equal), Some(def)) => { + equal.codegen(state); + def.codegen(state); + } + (None, Some(def)) => { + state.add_token(" = "); + def.codegen(state); + } + _ => {} + } + + match &self.comma { + Some(comma) => comma.codegen(state), + None if default_comma => state.add_token(", "), + _ => {} + } + + self.whitespace_after_param.codegen(state); + } +} + +#[cst_node] +pub struct Arg<'a> { + pub value: Expression<'a>, + pub keyword: Option>, + pub equal: Option>, + pub comma: Option>, + pub star: &'a str, + pub whitespace_after_star: ParenthesizableWhitespace<'a>, + pub whitespace_after_arg: ParenthesizableWhitespace<'a>, + + pub(crate) star_tok: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedArg<'r, 'a> { + type Inflated = Arg<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let whitespace_after_star = if let Some(star_tok) = self.star_tok.as_mut() { + parse_parenthesizable_whitespace(config, &mut star_tok.whitespace_after.borrow_mut())? + } else { + Default::default() + }; + let keyword = self.keyword.inflate(config)?; + let equal = self.equal.inflate(config)?; + let value = self.value.inflate(config)?; + let comma = self.comma.inflate(config)?; + // whitespace_after_arg is handled in Call + let whitespace_after_arg = Default::default(); + Ok(Self::Inflated { + value, + keyword, + equal, + comma, + star: self.star, + whitespace_after_star, + whitespace_after_arg, + }) + } +} + +impl<'a> Arg<'a> { + pub fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { + state.add_token(self.star); + self.whitespace_after_star.codegen(state); + if let Some(kw) = &self.keyword { + kw.codegen(state); + } + if let Some(eq) = &self.equal { + eq.codegen(state); + } else if self.keyword.is_some() { + state.add_token(" = "); + } + self.value.codegen(state); + + if let Some(comma) = &self.comma { + comma.codegen(state); + } else if default_comma { + state.add_token(", "); + } + + self.whitespace_after_arg.codegen(state); + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedArg<'r, 'a> { + fn with_comma(self, c: DeflatedComma<'r, 'a>) -> Self { + Self { + comma: Some(c), + ..self + } + } +} + +#[cst_node] +#[derive(Default)] +pub struct LeftParen<'a> { + /// Any space that appears directly after this left parenthesis. + pub whitespace_after: ParenthesizableWhitespace<'a>, + + pub(crate) lpar_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for LeftParen<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("("); + self.whitespace_after.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedLeftParen<'r, 'a> { + type Inflated = LeftParen<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*self.lpar_tok).whitespace_after.borrow_mut(), + )?; + Ok(Self::Inflated { whitespace_after }) + } +} + +#[cst_node] +#[derive(Default)] +pub struct RightParen<'a> { + /// Any space that appears directly before this right parenthesis. + pub whitespace_before: ParenthesizableWhitespace<'a>, + + pub(crate) rpar_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for RightParen<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token(")"); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedRightParen<'r, 'a> { + type Inflated = RightParen<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.rpar_tok).whitespace_before.borrow_mut(), + )?; + Ok(Self::Inflated { whitespace_before }) + } +} + +#[cst_node(ParenthesizedNode, Codegen, Inflate)] +pub enum Expression<'a> { + Name(Box>), + Ellipsis(Box>), + Integer(Box>), + Float(Box>), + Imaginary(Box>), + Comparison(Box>), + UnaryOperation(Box>), + BinaryOperation(Box>), + BooleanOperation(Box>), + Attribute(Box>), + Tuple(Box>), + Call(Box>), + GeneratorExp(Box>), + ListComp(Box>), + SetComp(Box>), + DictComp(Box>), + List(Box>), + Set(Box>), + Dict(Box>), + Subscript(Box>), + StarredElement(Box>), + IfExp(Box>), + Lambda(Box>), + Yield(Box>), + Await(Box>), + SimpleString(Box>), + ConcatenatedString(Box>), + FormattedString(Box>), + TemplatedString(Box>), + NamedExpr(Box>), +} + +#[cst_node(ParenthesizedNode)] +pub struct Ellipsis<'a> { + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for Ellipsis<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token("..."); + }) + } +} +impl<'r, 'a> Inflate<'a> for DeflatedEllipsis<'r, 'a> { + type Inflated = Ellipsis<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { lpar, rpar }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Integer<'a> { + /// A string representation of the integer, such as ``"100000"`` or + /// ``"100_000"``. + pub value: &'a str, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for Integer<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token(self.value); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedInteger<'r, 'a> { + type Inflated = Integer<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + value: self.value, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Float<'a> { + /// A string representation of the floating point number, such as ```"0.05"``, + /// ``".050"``, or ``"5e-2"``. + pub value: &'a str, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for Float<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token(self.value); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedFloat<'r, 'a> { + type Inflated = Float<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + value: self.value, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Imaginary<'a> { + /// A string representation of the complex number, such as ``"2j"`` + pub value: &'a str, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for Imaginary<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token(self.value); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedImaginary<'r, 'a> { + type Inflated = Imaginary<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + value: self.value, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Comparison<'a> { + pub left: Box>, + pub comparisons: Vec>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for Comparison<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.left.codegen(state); + for comp in &self.comparisons { + comp.codegen(state); + } + }) + } +} +impl<'r, 'a> Inflate<'a> for DeflatedComparison<'r, 'a> { + type Inflated = Comparison<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let left = self.left.inflate(config)?; + let comparisons = self.comparisons.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + left, + comparisons, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct UnaryOperation<'a> { + pub operator: UnaryOp<'a>, + pub expression: Box>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for UnaryOperation<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.operator.codegen(state); + self.expression.codegen(state); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedUnaryOperation<'r, 'a> { + type Inflated = UnaryOperation<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let operator = self.operator.inflate(config)?; + let expression = self.expression.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + operator, + expression, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct BinaryOperation<'a> { + pub left: Box>, + pub operator: BinaryOp<'a>, + pub right: Box>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for BinaryOperation<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.left.codegen(state); + self.operator.codegen(state); + self.right.codegen(state); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedBinaryOperation<'r, 'a> { + type Inflated = BinaryOperation<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let left = self.left.inflate(config)?; + let operator = self.operator.inflate(config)?; + let right = self.right.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + left, + operator, + right, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct BooleanOperation<'a> { + pub left: Box>, + pub operator: BooleanOp<'a>, + pub right: Box>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for BooleanOperation<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.left.codegen(state); + self.operator.codegen(state); + self.right.codegen(state); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedBooleanOperation<'r, 'a> { + type Inflated = BooleanOperation<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let left = self.left.inflate(config)?; + let operator = self.operator.inflate(config)?; + let right = self.right.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + left, + operator, + right, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Call<'a> { + pub func: Box>, + pub args: Vec>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_after_func: ParenthesizableWhitespace<'a>, + pub whitespace_before_args: ParenthesizableWhitespace<'a>, + + pub(crate) lpar_tok: TokenRef<'a>, + pub(crate) rpar_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedCall<'r, 'a> { + type Inflated = Call<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let func = self.func.inflate(config)?; + let whitespace_after_func = parse_parenthesizable_whitespace( + config, + &mut (*self.lpar_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_before_args = parse_parenthesizable_whitespace( + config, + &mut (*self.lpar_tok).whitespace_after.borrow_mut(), + )?; + let mut args = self.args.inflate(config)?; + + if let Some(arg) = args.last_mut() { + if arg.comma.is_none() { + arg.whitespace_after_arg = parse_parenthesizable_whitespace( + config, + &mut (*self.rpar_tok).whitespace_before.borrow_mut(), + )?; + } + } + let rpar = self.rpar.inflate(config)?; + + Ok(Self::Inflated { + func, + args, + lpar, + rpar, + whitespace_after_func, + whitespace_before_args, + }) + } +} + +impl<'a> Codegen<'a> for Call<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.func.codegen(state); + self.whitespace_after_func.codegen(state); + state.add_token("("); + self.whitespace_before_args.codegen(state); + let arg_len = self.args.len(); + for (i, arg) in self.args.iter().enumerate() { + arg.codegen(state, i + 1 < arg_len); + } + state.add_token(")"); + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Attribute<'a> { + pub value: Box>, + pub attr: Name<'a>, + pub dot: Dot<'a>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedAttribute<'r, 'a> { + type Inflated = Attribute<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let value = self.value.inflate(config)?; + let dot = self.dot.inflate(config)?; + let attr = self.attr.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + value, + attr, + dot, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for Attribute<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.value.codegen(state); + self.dot.codegen(state); + self.attr.codegen(state); + }) + } +} + +#[cst_node(Codegen, Inflate)] +pub enum NameOrAttribute<'a> { + N(Box>), + A(Box>), +} + +impl<'r, 'a> std::convert::From> for DeflatedExpression<'r, 'a> { + fn from(x: DeflatedNameOrAttribute<'r, 'a>) -> Self { + match x { + DeflatedNameOrAttribute::N(n) => Self::Name(n), + DeflatedNameOrAttribute::A(a) => Self::Attribute(a), + } + } +} + +#[cst_node] +pub struct ComparisonTarget<'a> { + pub operator: CompOp<'a>, + pub comparator: Expression<'a>, +} + +impl<'a> Codegen<'a> for ComparisonTarget<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.operator.codegen(state); + self.comparator.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedComparisonTarget<'r, 'a> { + type Inflated = ComparisonTarget<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let operator = self.operator.inflate(config)?; + let comparator = self.comparator.inflate(config)?; + Ok(Self::Inflated { + operator, + comparator, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct StarredElement<'a> { + pub value: Box>, + pub comma: Option>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_before_value: ParenthesizableWhitespace<'a>, + + pub(crate) star_tok: TokenRef<'a>, +} + +impl<'r, 'a> DeflatedStarredElement<'r, 'a> { + pub fn inflate_element(self, config: &Config<'a>, is_last: bool) -> Result> { + let lpar = self.lpar.inflate(config)?; + let whitespace_before_value = parse_parenthesizable_whitespace( + config, + &mut (*self.star_tok).whitespace_after.borrow_mut(), + )?; + let value = self.value.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + let comma = if is_last { + self.comma.map(|c| c.inflate_before(config)).transpose() + } else { + self.comma.inflate(config) + }?; + Ok(StarredElement { + value, + comma, + lpar, + rpar, + whitespace_before_value, + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedStarredElement<'r, 'a> { + type Inflated = StarredElement<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + self.inflate_element(config, false) + } +} + +impl<'a> Codegen<'a> for StarredElement<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token("*"); + self.whitespace_before_value.codegen(state); + self.value.codegen(state); + }); + if let Some(comma) = &self.comma { + comma.codegen(state); + } + } +} + +#[allow(clippy::large_enum_variant)] +#[cst_node(NoIntoPy)] +pub enum Element<'a> { + Simple { + value: Expression<'a>, + comma: Option>, + }, + Starred(Box>), +} + +impl<'a> Element<'a> { + pub fn codegen( + &self, + state: &mut CodegenState<'a>, + default_comma: bool, + default_comma_whitespace: bool, + ) { + match self { + Self::Simple { value, comma } => { + value.codegen(state); + if let Some(comma) = comma { + comma.codegen(state) + } + } + Self::Starred(s) => s.codegen(state), + } + let maybe_comma = match self { + Self::Simple { comma, .. } => comma, + Self::Starred(s) => &s.comma, + }; + if maybe_comma.is_none() && default_comma { + state.add_token(if default_comma_whitespace { ", " } else { "," }); + } + } +} +impl<'r, 'a> DeflatedElement<'r, 'a> { + pub fn inflate_element(self, config: &Config<'a>, is_last: bool) -> Result> { + Ok(match self { + Self::Starred(s) => Element::Starred(Box::new(s.inflate_element(config, is_last)?)), + Self::Simple { value, comma } => Element::Simple { + value: value.inflate(config)?, + comma: if is_last { + comma.map(|c| c.inflate_before(config)).transpose()? + } else { + comma.inflate(config)? + }, + }, + }) + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedElement<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + let comma = Some(comma); + match self { + Self::Simple { value, .. } => Self::Simple { comma, value }, + Self::Starred(mut s) => { + s.comma = comma; + Self::Starred(s) + } + } + } +} +impl<'r, 'a> std::convert::From> for DeflatedElement<'r, 'a> { + fn from(e: DeflatedExpression<'r, 'a>) -> Self { + match e { + DeflatedExpression::StarredElement(e) => Self::Starred(e), + value => Self::Simple { value, comma: None }, + } + } +} + +#[cst_node(ParenthesizedNode, Default)] +pub struct Tuple<'a> { + pub elements: Vec>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedTuple<'r, 'a> { + type Inflated = Tuple<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let len = self.elements.len(); + let elements = self + .elements + .into_iter() + .enumerate() + .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) + .collect::>>()?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + elements, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for Tuple<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + let len = self.elements.len(); + if len == 1 { + self.elements.first().unwrap().codegen(state, true, false); + } else { + for (idx, el) in self.elements.iter().enumerate() { + el.codegen(state, idx < len - 1, true); + } + } + }); + } +} + +#[cst_node(ParenthesizedNode)] +pub struct GeneratorExp<'a> { + pub elt: Box>, + pub for_in: Box>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for GeneratorExp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.elt.codegen(state); + self.for_in.codegen(state); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedGeneratorExp<'r, 'a> { + type Inflated = GeneratorExp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let elt = self.elt.inflate(config)?; + let for_in = self.for_in.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + elt, + for_in, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct ListComp<'a> { + pub elt: Box>, + pub for_in: Box>, + pub lbracket: LeftSquareBracket<'a>, + pub rbracket: RightSquareBracket<'a>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for ListComp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.lbracket.codegen(state); + self.elt.codegen(state); + self.for_in.codegen(state); + self.rbracket.codegen(state); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedListComp<'r, 'a> { + type Inflated = ListComp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let lbracket = self.lbracket.inflate(config)?; + let elt = self.elt.inflate(config)?; + let for_in = self.for_in.inflate(config)?; + let rbracket = self.rbracket.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + elt, + for_in, + lbracket, + rbracket, + lpar, + rpar, + }) + } +} + +#[cst_node] +#[derive(Default)] +pub struct LeftSquareBracket<'a> { + pub whitespace_after: ParenthesizableWhitespace<'a>, + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for LeftSquareBracket<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("["); + self.whitespace_after.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedLeftSquareBracket<'r, 'a> { + type Inflated = LeftSquareBracket<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_after.borrow_mut(), + )?; + Ok(Self::Inflated { whitespace_after }) + } +} + +#[cst_node] +#[derive(Default)] +pub struct RightSquareBracket<'a> { + pub whitespace_before: ParenthesizableWhitespace<'a>, + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for RightSquareBracket<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token("]"); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedRightSquareBracket<'r, 'a> { + type Inflated = RightSquareBracket<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?; + Ok(Self::Inflated { whitespace_before }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct SetComp<'a> { + pub elt: Box>, + pub for_in: Box>, + pub lbrace: LeftCurlyBrace<'a>, + pub rbrace: RightCurlyBrace<'a>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedSetComp<'r, 'a> { + type Inflated = SetComp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let lbrace = self.lbrace.inflate(config)?; + let elt = self.elt.inflate(config)?; + let for_in = self.for_in.inflate(config)?; + let rbrace = self.rbrace.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + elt, + for_in, + lbrace, + rbrace, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for SetComp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.lbrace.codegen(state); + self.elt.codegen(state); + self.for_in.codegen(state); + self.rbrace.codegen(state); + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct DictComp<'a> { + pub key: Box>, + pub value: Box>, + pub for_in: Box>, + pub lbrace: LeftCurlyBrace<'a>, + pub rbrace: RightCurlyBrace<'a>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_before_colon: ParenthesizableWhitespace<'a>, + pub whitespace_after_colon: ParenthesizableWhitespace<'a>, + + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedDictComp<'r, 'a> { + type Inflated = DictComp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let lbrace = self.lbrace.inflate(config)?; + let key = self.key.inflate(config)?; + let whitespace_before_colon = parse_parenthesizable_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after_colon = parse_parenthesizable_whitespace( + config, + &mut (*self.colon_tok).whitespace_after.borrow_mut(), + )?; + let value = self.value.inflate(config)?; + let for_in = self.for_in.inflate(config)?; + let rbrace = self.rbrace.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + key, + value, + for_in, + lbrace, + rbrace, + lpar, + rpar, + whitespace_before_colon, + whitespace_after_colon, + }) + } +} + +impl<'a> Codegen<'a> for DictComp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.lbrace.codegen(state); + self.key.codegen(state); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.whitespace_after_colon.codegen(state); + self.value.codegen(state); + self.for_in.codegen(state); + self.rbrace.codegen(state); + }) + } +} + +#[cst_node] +pub struct LeftCurlyBrace<'a> { + pub whitespace_after: ParenthesizableWhitespace<'a>, + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Default for LeftCurlyBrace<'a> { + fn default() -> Self { + Self { + whitespace_after: Default::default(), + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedLeftCurlyBrace<'r, 'a> { + type Inflated = LeftCurlyBrace<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_after.borrow_mut(), + )?; + Ok(Self::Inflated { whitespace_after }) + } +} + +impl<'a> Codegen<'a> for LeftCurlyBrace<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("{"); + self.whitespace_after.codegen(state); + } +} + +#[cst_node] +pub struct RightCurlyBrace<'a> { + pub whitespace_before: ParenthesizableWhitespace<'a>, + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Default for RightCurlyBrace<'a> { + fn default() -> Self { + Self { + whitespace_before: Default::default(), + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedRightCurlyBrace<'r, 'a> { + type Inflated = RightCurlyBrace<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?; + Ok(Self::Inflated { whitespace_before }) + } +} + +impl<'a> Codegen<'a> for RightCurlyBrace<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token("}"); + } +} + +#[cst_node] +pub struct CompFor<'a> { + pub target: AssignTargetExpression<'a>, + pub iter: Expression<'a>, + pub ifs: Vec>, + pub inner_for_in: Option>>, + pub asynchronous: Option>, + pub whitespace_before: ParenthesizableWhitespace<'a>, + pub whitespace_after_for: ParenthesizableWhitespace<'a>, + pub whitespace_before_in: ParenthesizableWhitespace<'a>, + pub whitespace_after_in: ParenthesizableWhitespace<'a>, + + pub(crate) async_tok: Option>, + pub(crate) for_tok: TokenRef<'a>, + pub(crate) in_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for CompFor<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + if let Some(asynchronous) = &self.asynchronous { + asynchronous.codegen(state); + } + state.add_token("for"); + self.whitespace_after_for.codegen(state); + self.target.codegen(state); + self.whitespace_before_in.codegen(state); + state.add_token("in"); + self.whitespace_after_in.codegen(state); + self.iter.codegen(state); + for if_ in &self.ifs { + if_.codegen(state); + } + if let Some(inner) = &self.inner_for_in { + inner.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedCompFor<'r, 'a> { + type Inflated = CompFor<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let mut whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.for_tok).whitespace_before.borrow_mut(), + )?; + let asynchronous = if let Some(asy_tok) = self.async_tok.as_mut() { + // If there is an async keyword, the start of the CompFor expression is + // considered to be this keyword, so whitespace_before needs to adjust but + // Asynchronous will own the whitespace before the for token. + let mut asy_whitespace_after = parse_parenthesizable_whitespace( + config, + &mut asy_tok.whitespace_before.borrow_mut(), + )?; + swap(&mut asy_whitespace_after, &mut whitespace_before); + Some(Asynchronous { + whitespace_after: asy_whitespace_after, + }) + } else { + None + }; + let whitespace_after_for = parse_parenthesizable_whitespace( + config, + &mut (*self.for_tok).whitespace_after.borrow_mut(), + )?; + let target = self.target.inflate(config)?; + let whitespace_before_in = parse_parenthesizable_whitespace( + config, + &mut (*self.in_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after_in = parse_parenthesizable_whitespace( + config, + &mut (*self.in_tok).whitespace_after.borrow_mut(), + )?; + let iter = self.iter.inflate(config)?; + let ifs = self.ifs.inflate(config)?; + let inner_for_in = self.inner_for_in.inflate(config)?; + Ok(Self::Inflated { + target, + iter, + ifs, + inner_for_in, + asynchronous, + whitespace_before, + whitespace_after_for, + whitespace_before_in, + whitespace_after_in, + }) + } +} + +#[cst_node] +pub struct Asynchronous<'a> { + pub whitespace_after: ParenthesizableWhitespace<'a>, +} + +impl<'a> Codegen<'a> for Asynchronous<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("async"); + self.whitespace_after.codegen(state); + } +} + +pub(crate) fn make_async<'r, 'a>() -> DeflatedAsynchronous<'r, 'a> { + DeflatedAsynchronous { + _phantom: Default::default(), + } +} + +#[cst_node] +pub struct CompIf<'a> { + pub test: Expression<'a>, + pub whitespace_before: ParenthesizableWhitespace<'a>, + pub whitespace_before_test: ParenthesizableWhitespace<'a>, + + pub(crate) if_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for CompIf<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token("if"); + self.whitespace_before_test.codegen(state); + self.test.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedCompIf<'r, 'a> { + type Inflated = CompIf<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.if_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_before_test = parse_parenthesizable_whitespace( + config, + &mut (*self.if_tok).whitespace_after.borrow_mut(), + )?; + let test = self.test.inflate(config)?; + Ok(Self::Inflated { + test, + whitespace_before, + whitespace_before_test, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct List<'a> { + pub elements: Vec>, + pub lbracket: LeftSquareBracket<'a>, + pub rbracket: RightSquareBracket<'a>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedList<'r, 'a> { + type Inflated = List<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let lbracket = self.lbracket.inflate(config)?; + let len = self.elements.len(); + let elements = self + .elements + .into_iter() + .enumerate() + .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) + .collect::>>()?; + let rbracket = if !elements.is_empty() { + // lbracket owns all the whitespace if there are no elements + self.rbracket.inflate(config)? + } else { + Default::default() + }; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + elements, + lbracket, + rbracket, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for List<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.lbracket.codegen(state); + let len = self.elements.len(); + for (idx, el) in self.elements.iter().enumerate() { + el.codegen(state, idx < len - 1, true); + } + self.rbracket.codegen(state); + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Set<'a> { + pub elements: Vec>, + pub lbrace: LeftCurlyBrace<'a>, + pub rbrace: RightCurlyBrace<'a>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedSet<'r, 'a> { + type Inflated = Set<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let lbrace = self.lbrace.inflate(config)?; + let len = self.elements.len(); + let elements = self + .elements + .into_iter() + .enumerate() + .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) + .collect::>>()?; + let rbrace = if !elements.is_empty() { + self.rbrace.inflate(config)? + } else { + Default::default() + }; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + elements, + lbrace, + rbrace, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for Set<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.lbrace.codegen(state); + let len = self.elements.len(); + for (idx, el) in self.elements.iter().enumerate() { + el.codegen(state, idx < len - 1, true); + } + self.rbrace.codegen(state); + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Dict<'a> { + pub elements: Vec>, + pub lbrace: LeftCurlyBrace<'a>, + pub rbrace: RightCurlyBrace<'a>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedDict<'r, 'a> { + type Inflated = Dict<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let lbrace = self.lbrace.inflate(config)?; + let len = self.elements.len(); + let elements = self + .elements + .into_iter() + .enumerate() + .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) + .collect::>>()?; + let rbrace = if !elements.is_empty() { + self.rbrace.inflate(config)? + } else { + Default::default() + }; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + elements, + lbrace, + rbrace, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for Dict<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.lbrace.codegen(state); + let len = self.elements.len(); + for (idx, el) in self.elements.iter().enumerate() { + el.codegen(state, idx < len - 1, true); + } + self.rbrace.codegen(state); + }) + } +} + +#[cst_node(NoIntoPy)] +pub enum DictElement<'a> { + Simple { + key: Expression<'a>, + value: Expression<'a>, + comma: Option>, + whitespace_before_colon: ParenthesizableWhitespace<'a>, + whitespace_after_colon: ParenthesizableWhitespace<'a>, + colon_tok: TokenRef<'a>, + }, + Starred(StarredDictElement<'a>), +} + +impl<'r, 'a> DeflatedDictElement<'r, 'a> { + pub fn inflate_element( + self, + config: &Config<'a>, + last_element: bool, + ) -> Result> { + Ok(match self { + Self::Starred(s) => DictElement::Starred(s.inflate_element(config, last_element)?), + Self::Simple { + key, + value, + comma, + colon_tok, + .. + } => { + let whitespace_before_colon = parse_parenthesizable_whitespace( + config, + &mut colon_tok.whitespace_before.borrow_mut(), + )?; + let whitespace_after_colon = parse_parenthesizable_whitespace( + config, + &mut colon_tok.whitespace_after.borrow_mut(), + )?; + DictElement::Simple { + key: key.inflate(config)?, + whitespace_before_colon, + whitespace_after_colon, + value: value.inflate(config)?, + comma: if last_element { + comma.map(|c| c.inflate_before(config)).transpose() + } else { + comma.inflate(config) + }?, + } + } + }) + } +} + +impl<'a> DictElement<'a> { + fn codegen( + &self, + state: &mut CodegenState<'a>, + default_comma: bool, + default_comma_whitespace: bool, + ) { + match self { + Self::Simple { + key, + value, + comma, + whitespace_before_colon, + whitespace_after_colon, + .. + } => { + key.codegen(state); + whitespace_before_colon.codegen(state); + state.add_token(":"); + whitespace_after_colon.codegen(state); + value.codegen(state); + if let Some(comma) = comma { + comma.codegen(state) + } + } + Self::Starred(s) => s.codegen(state), + } + let maybe_comma = match self { + Self::Simple { comma, .. } => comma, + Self::Starred(s) => &s.comma, + }; + if maybe_comma.is_none() && default_comma { + state.add_token(if default_comma_whitespace { ", " } else { "," }); + } + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedDictElement<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + let comma = Some(comma); + match self { + Self::Starred(s) => Self::Starred(DeflatedStarredDictElement { comma, ..s }), + Self::Simple { + key, + value, + colon_tok, + .. + } => Self::Simple { + comma, + key, + value, + colon_tok, + }, + } + } +} + +#[cst_node] +pub struct StarredDictElement<'a> { + pub value: Expression<'a>, + pub comma: Option>, + pub whitespace_before_value: ParenthesizableWhitespace<'a>, + + pub(crate) star_tok: TokenRef<'a>, +} + +impl<'r, 'a> DeflatedStarredDictElement<'r, 'a> { + fn inflate_element( + self, + config: &Config<'a>, + last_element: bool, + ) -> Result> { + let whitespace_before_value = parse_parenthesizable_whitespace( + config, + &mut (*self.star_tok).whitespace_after.borrow_mut(), + )?; + let value = self.value.inflate(config)?; + let comma = if last_element { + self.comma.map(|c| c.inflate_before(config)).transpose() + } else { + self.comma.inflate(config) + }?; + Ok(StarredDictElement { + value, + comma, + whitespace_before_value, + }) + } +} + +impl<'a> Codegen<'a> for StarredDictElement<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("**"); + self.whitespace_before_value.codegen(state); + self.value.codegen(state); + if let Some(comma) = &self.comma { + comma.codegen(state); + } + } +} + +#[cst_node(Codegen, Inflate)] +pub enum BaseSlice<'a> { + Index(Box>), + Slice(Box>), +} + +#[cst_node] +pub struct Index<'a> { + pub value: Expression<'a>, + pub star: Option<&'a str>, + pub whitespace_after_star: Option>, + + pub(crate) star_tok: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedIndex<'r, 'a> { + type Inflated = Index<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let (star, whitespace_after_star) = if let Some(star_tok) = self.star_tok.as_mut() { + ( + Some(star_tok.string), + Some(parse_parenthesizable_whitespace( + config, + &mut star_tok.whitespace_after.borrow_mut(), + )?), + ) + } else { + (None, None) + }; + let value = self.value.inflate(config)?; + Ok(Self::Inflated { + value, + star, + whitespace_after_star, + }) + } +} + +impl<'a> Codegen<'a> for Index<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + if let Some(star) = self.star { + state.add_token(star); + } + self.whitespace_after_star.codegen(state); + self.value.codegen(state); + } +} + +#[cst_node] +pub struct Slice<'a> { + #[cfg_attr(feature = "py", no_py_default)] + pub lower: Option>, + #[cfg_attr(feature = "py", no_py_default)] + pub upper: Option>, + pub step: Option>, + pub first_colon: Colon<'a>, + pub second_colon: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedSlice<'r, 'a> { + type Inflated = Slice<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lower = self.lower.inflate(config)?; + let first_colon = self.first_colon.inflate(config)?; + let upper = self.upper.inflate(config)?; + let second_colon = self.second_colon.inflate(config)?; + let step = self.step.inflate(config)?; + Ok(Self::Inflated { + lower, + upper, + step, + first_colon, + second_colon, + }) + } +} + +impl<'a> Codegen<'a> for Slice<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + if let Some(lower) = &self.lower { + lower.codegen(state); + } + self.first_colon.codegen(state); + if let Some(upper) = &self.upper { + upper.codegen(state); + } + if let Some(second_colon) = &self.second_colon { + second_colon.codegen(state); + } else if self.step.is_some() { + state.add_token(";"); + } + if let Some(step) = &self.step { + step.codegen(state); + } + } +} + +#[cst_node] +pub struct SubscriptElement<'a> { + pub slice: BaseSlice<'a>, + pub comma: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedSubscriptElement<'r, 'a> { + type Inflated = SubscriptElement<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let slice = self.slice.inflate(config)?; + let comma = self.comma.inflate(config)?; + Ok(Self::Inflated { slice, comma }) + } +} + +impl<'a> Codegen<'a> for SubscriptElement<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.slice.codegen(state); + if let Some(comma) = &self.comma { + comma.codegen(state); + } + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Subscript<'a> { + pub value: Box>, + pub slice: Vec>, + pub lbracket: LeftSquareBracket<'a>, + pub rbracket: RightSquareBracket<'a>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_after_value: ParenthesizableWhitespace<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedSubscript<'r, 'a> { + type Inflated = Subscript<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let value = self.value.inflate(config)?; + let whitespace_after_value = parse_parenthesizable_whitespace( + config, + &mut self.lbracket.tok.whitespace_before.borrow_mut(), + )?; + let lbracket = self.lbracket.inflate(config)?; + let slice = self.slice.inflate(config)?; + let rbracket = self.rbracket.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + value, + slice, + lbracket, + rbracket, + lpar, + rpar, + whitespace_after_value, + }) + } +} + +impl<'a> Codegen<'a> for Subscript<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.value.codegen(state); + self.whitespace_after_value.codegen(state); + self.lbracket.codegen(state); + let len = self.slice.len(); + for (i, slice) in self.slice.iter().enumerate() { + slice.codegen(state); + if slice.comma.is_none() && i + 1 < len { + state.add_token(", ") + } + } + self.rbracket.codegen(state); + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct IfExp<'a> { + pub test: Box>, + pub body: Box>, + pub orelse: Box>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_before_if: ParenthesizableWhitespace<'a>, + pub whitespace_after_if: ParenthesizableWhitespace<'a>, + pub whitespace_before_else: ParenthesizableWhitespace<'a>, + pub whitespace_after_else: ParenthesizableWhitespace<'a>, + + pub(crate) if_tok: TokenRef<'a>, + pub(crate) else_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedIfExp<'r, 'a> { + type Inflated = IfExp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let body = self.body.inflate(config)?; + let whitespace_before_if = parse_parenthesizable_whitespace( + config, + &mut (*self.if_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after_if = parse_parenthesizable_whitespace( + config, + &mut (*self.if_tok).whitespace_after.borrow_mut(), + )?; + let test = self.test.inflate(config)?; + let whitespace_before_else = parse_parenthesizable_whitespace( + config, + &mut (*self.else_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after_else = parse_parenthesizable_whitespace( + config, + &mut (*self.else_tok).whitespace_after.borrow_mut(), + )?; + let orelse = self.orelse.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + test, + body, + orelse, + lpar, + rpar, + whitespace_before_if, + whitespace_after_if, + whitespace_before_else, + whitespace_after_else, + }) + } +} + +impl<'a> Codegen<'a> for IfExp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.body.codegen(state); + self.whitespace_before_if.codegen(state); + state.add_token("if"); + self.whitespace_after_if.codegen(state); + self.test.codegen(state); + self.whitespace_before_else.codegen(state); + state.add_token("else"); + self.whitespace_after_else.codegen(state); + self.orelse.codegen(state); + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Lambda<'a> { + pub params: Box>, + pub body: Box>, + pub colon: Colon<'a>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_after_lambda: Option>, + + pub(crate) lambda_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedLambda<'r, 'a> { + type Inflated = Lambda<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let whitespace_after_lambda = if !self.params.is_empty() { + Some(parse_parenthesizable_whitespace( + config, + &mut (*self.lambda_tok).whitespace_after.borrow_mut(), + )?) + } else { + Default::default() + }; + let mut params = self.params.inflate(config)?; + adjust_parameters_trailing_whitespace(config, &mut params, &self.colon.tok)?; + let colon = self.colon.inflate(config)?; + let body = self.body.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + params, + body, + colon, + lpar, + rpar, + whitespace_after_lambda, + }) + } +} + +impl<'a> Codegen<'a> for Lambda<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token("lambda"); + if let Some(ws) = &self.whitespace_after_lambda { + ws.codegen(state); + } else if !self.params.is_empty() { + // there's one or more params, add a space + state.add_token(" ") + } + self.params.codegen(state); + self.colon.codegen(state); + self.body.codegen(state); + }) + } +} + +#[cst_node] +pub struct From<'a> { + pub item: Expression<'a>, + pub whitespace_before_from: Option>, + pub whitespace_after_from: ParenthesizableWhitespace<'a>, + + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> From<'a> { + pub fn codegen(&self, state: &mut CodegenState<'a>, default_space: &'a str) { + if let Some(ws) = &self.whitespace_before_from { + ws.codegen(state); + } else { + state.add_token(default_space); + } + state.add_token("from"); + self.whitespace_after_from.codegen(state); + self.item.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedFrom<'r, 'a> { + type Inflated = From<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before_from = Some(parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?); + let whitespace_after_from = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_after.borrow_mut(), + )?; + let item = self.item.inflate(config)?; + Ok(Self::Inflated { + item, + whitespace_before_from, + whitespace_after_from, + }) + } +} + +#[cst_node] +pub enum YieldValue<'a> { + Expression(Box>), + From(Box>), +} + +impl<'r, 'a> Inflate<'a> for DeflatedYieldValue<'r, 'a> { + type Inflated = YieldValue<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + Ok(match self { + Self::Expression(e) => Self::Inflated::Expression(e.inflate(config)?), + Self::From(e) => { + let mut e = e.inflate(config)?; + e.whitespace_before_from = None; + Self::Inflated::From(e) + } + }) + } +} + +impl<'a> YieldValue<'a> { + fn codegen(&self, state: &mut CodegenState<'a>, default_space: &'a str) { + match self { + Self::Expression(e) => e.codegen(state), + Self::From(f) => f.codegen(state, default_space), + } + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Yield<'a> { + pub value: Option>>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_after_yield: Option>, + + pub(crate) yield_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedYield<'r, 'a> { + type Inflated = Yield<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let whitespace_after_yield = if self.value.is_some() { + Some(parse_parenthesizable_whitespace( + config, + &mut (*self.yield_tok).whitespace_after.borrow_mut(), + )?) + } else { + Default::default() + }; + let value = self.value.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + value, + lpar, + rpar, + whitespace_after_yield, + }) + } +} + +impl<'a> Codegen<'a> for Yield<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token("yield"); + if let Some(ws) = &self.whitespace_after_yield { + ws.codegen(state); + } else if self.value.is_some() { + state.add_token(" "); + } + + if let Some(val) = &self.value { + val.codegen(state, "") + } + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct Await<'a> { + pub expression: Box>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_after_await: ParenthesizableWhitespace<'a>, + + pub(crate) await_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedAwait<'r, 'a> { + type Inflated = Await<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let whitespace_after_await = parse_parenthesizable_whitespace( + config, + &mut (*self.await_tok).whitespace_after.borrow_mut(), + )?; + let expression = self.expression.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + expression, + lpar, + rpar, + whitespace_after_await, + }) + } +} + +impl<'a> Codegen<'a> for Await<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token("await"); + self.whitespace_after_await.codegen(state); + self.expression.codegen(state); + }) + } +} + +#[cst_node(Codegen, Inflate)] +pub enum String<'a> { + Simple(SimpleString<'a>), + Concatenated(ConcatenatedString<'a>), + Formatted(FormattedString<'a>), + Templated(TemplatedString<'a>), +} + +impl<'r, 'a> std::convert::From> for DeflatedExpression<'r, 'a> { + fn from(s: DeflatedString<'r, 'a>) -> Self { + match s { + DeflatedString::Simple(s) => Self::SimpleString(Box::new(s)), + DeflatedString::Concatenated(s) => Self::ConcatenatedString(Box::new(s)), + DeflatedString::Formatted(s) => Self::FormattedString(Box::new(s)), + DeflatedString::Templated(s) => Self::TemplatedString(Box::new(s)), + } + } +} + +#[cst_node(ParenthesizedNode)] +pub struct ConcatenatedString<'a> { + pub left: Box>, + pub right: Box>, + pub lpar: Vec>, + pub rpar: Vec>, + pub whitespace_between: ParenthesizableWhitespace<'a>, + + // we capture the next token after each string piece so Inflate can extract the + // whitespace between individual pieces + pub(crate) right_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedConcatenatedString<'r, 'a> { + type Inflated = ConcatenatedString<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let left = self.left.inflate(config)?; + let whitespace_between = parse_parenthesizable_whitespace( + config, + &mut (*self.right_tok).whitespace_before.borrow_mut(), + )?; + let right = self.right.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + left, + right, + lpar, + rpar, + whitespace_between, + }) + } +} + +impl<'a> Codegen<'a> for ConcatenatedString<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.left.codegen(state); + self.whitespace_between.codegen(state); + self.right.codegen(state); + }) + } +} + +#[cst_node(ParenthesizedNode, Default)] +pub struct SimpleString<'a> { + /// The texual representation of the string, including quotes, prefix + /// characters, and any escape characters present in the original source code, + /// such as ``r"my string\n"``. + pub value: &'a str, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedSimpleString<'r, 'a> { + type Inflated = SimpleString<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + value: self.value, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for SimpleString<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| state.add_token(self.value)) + } +} + +#[cst_node] +pub struct TemplatedStringText<'a> { + pub value: &'a str, +} + +impl<'r, 'a> Inflate<'a> for DeflatedTemplatedStringText<'r, 'a> { + type Inflated = TemplatedStringText<'a>; + fn inflate(self, _config: &Config<'a>) -> Result { + Ok(Self::Inflated { value: self.value }) + } +} + +impl<'a> Codegen<'a> for TemplatedStringText<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token(self.value); + } +} + +pub(crate) fn make_tstringtext<'r, 'a>(value: &'a str) -> DeflatedTemplatedStringText<'r, 'a> { + DeflatedTemplatedStringText { + value, + _phantom: Default::default(), + } +} + +#[cst_node] +pub struct TemplatedStringExpression<'a> { + // This represents the part of a t-string that is insde the brackets '{' and '}'. + pub expression: Expression<'a>, + pub conversion: Option<&'a str>, + pub format_spec: Option>>, + pub whitespace_before_expression: ParenthesizableWhitespace<'a>, + pub whitespace_after_expression: ParenthesizableWhitespace<'a>, + pub equal: Option>, + + pub(crate) lbrace_tok: TokenRef<'a>, + // This is None if there's an equal sign, otherwise it's the first token of + // (conversion, format spec, right brace) in that order + pub(crate) after_expr_tok: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedTemplatedStringExpression<'r, 'a> { + type Inflated = TemplatedStringExpression<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let whitespace_before_expression = parse_parenthesizable_whitespace( + config, + &mut (*self.lbrace_tok).whitespace_after.borrow_mut(), + )?; + let expression = self.expression.inflate(config)?; + let equal = self.equal.inflate(config)?; + let whitespace_after_expression = if let Some(after_expr_tok) = self.after_expr_tok.as_mut() + { + parse_parenthesizable_whitespace( + config, + &mut after_expr_tok.whitespace_before.borrow_mut(), + )? + } else { + Default::default() + }; + let format_spec = self.format_spec.inflate(config)?; + Ok(Self::Inflated { + expression, + conversion: self.conversion, + format_spec, + whitespace_before_expression, + whitespace_after_expression, + equal, + }) + } +} + +impl<'a> Codegen<'a> for TemplatedStringExpression<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("{"); + self.whitespace_before_expression.codegen(state); + self.expression.codegen(state); + if let Some(eq) = &self.equal { + eq.codegen(state); + } + self.whitespace_after_expression.codegen(state); + if let Some(conv) = &self.conversion { + state.add_token("!"); + state.add_token(conv); + } + if let Some(specs) = &self.format_spec { + state.add_token(":"); + for spec in specs { + spec.codegen(state); + } + } + state.add_token("}"); + } +} + +#[cst_node(ParenthesizedNode)] +pub struct TemplatedString<'a> { + pub parts: Vec>, + pub start: &'a str, + pub end: &'a str, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedTemplatedString<'r, 'a> { + type Inflated = TemplatedString<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let parts = self.parts.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + parts, + start: self.start, + end: self.end, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for TemplatedString<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token(self.start); + for part in &self.parts { + part.codegen(state); + } + state.add_token(self.end); + }) + } +} + +#[cst_node(Codegen, Inflate)] +pub enum TemplatedStringContent<'a> { + Text(TemplatedStringText<'a>), + Expression(Box>), +} +#[cst_node] +pub struct FormattedStringText<'a> { + pub value: &'a str, +} + +impl<'r, 'a> Inflate<'a> for DeflatedFormattedStringText<'r, 'a> { + type Inflated = FormattedStringText<'a>; + fn inflate(self, _config: &Config<'a>) -> Result { + Ok(Self::Inflated { value: self.value }) + } +} + +impl<'a> Codegen<'a> for FormattedStringText<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token(self.value); + } +} + +pub(crate) fn make_fstringtext<'r, 'a>(value: &'a str) -> DeflatedFormattedStringText<'r, 'a> { + DeflatedFormattedStringText { + value, + _phantom: Default::default(), + } +} + +#[cst_node] +pub struct FormattedStringExpression<'a> { + pub expression: Expression<'a>, + pub conversion: Option<&'a str>, + pub format_spec: Option>>, + pub whitespace_before_expression: ParenthesizableWhitespace<'a>, + pub whitespace_after_expression: ParenthesizableWhitespace<'a>, + pub equal: Option>, + + pub(crate) lbrace_tok: TokenRef<'a>, + // This is None if there's an equal sign, otherwise it's the first token of + // (conversion, format spec, right brace) in that order + pub(crate) after_expr_tok: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedFormattedStringExpression<'r, 'a> { + type Inflated = FormattedStringExpression<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let whitespace_before_expression = parse_parenthesizable_whitespace( + config, + &mut (*self.lbrace_tok).whitespace_after.borrow_mut(), + )?; + let expression = self.expression.inflate(config)?; + let equal = self.equal.inflate(config)?; + let whitespace_after_expression = if let Some(after_expr_tok) = self.after_expr_tok.as_mut() + { + parse_parenthesizable_whitespace( + config, + &mut after_expr_tok.whitespace_before.borrow_mut(), + )? + } else { + Default::default() + }; + let format_spec = self.format_spec.inflate(config)?; + Ok(Self::Inflated { + expression, + conversion: self.conversion, + format_spec, + whitespace_before_expression, + whitespace_after_expression, + equal, + }) + } +} + +impl<'a> Codegen<'a> for FormattedStringExpression<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("{"); + self.whitespace_before_expression.codegen(state); + self.expression.codegen(state); + if let Some(eq) = &self.equal { + eq.codegen(state); + } + self.whitespace_after_expression.codegen(state); + if let Some(conv) = &self.conversion { + state.add_token("!"); + state.add_token(conv); + } + if let Some(specs) = &self.format_spec { + state.add_token(":"); + for spec in specs { + spec.codegen(state); + } + } + state.add_token("}"); + } +} + +#[cst_node(Codegen, Inflate)] +pub enum FormattedStringContent<'a> { + Text(FormattedStringText<'a>), + Expression(Box>), +} + +#[cst_node(ParenthesizedNode)] +pub struct FormattedString<'a> { + pub parts: Vec>, + pub start: &'a str, + pub end: &'a str, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedFormattedString<'r, 'a> { + type Inflated = FormattedString<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let parts = self.parts.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + parts, + start: self.start, + end: self.end, + lpar, + rpar, + }) + } +} + +impl<'a> Codegen<'a> for FormattedString<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + state.add_token(self.start); + for part in &self.parts { + part.codegen(state); + } + state.add_token(self.end); + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct NamedExpr<'a> { + pub target: Box>, + pub value: Box>, + pub lpar: Vec>, + pub rpar: Vec>, + + pub whitespace_before_walrus: ParenthesizableWhitespace<'a>, + pub whitespace_after_walrus: ParenthesizableWhitespace<'a>, + + pub(crate) walrus_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for NamedExpr<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.target.codegen(state); + self.whitespace_before_walrus.codegen(state); + state.add_token(":="); + self.whitespace_after_walrus.codegen(state); + self.value.codegen(state); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedNamedExpr<'r, 'a> { + type Inflated = NamedExpr<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let target = self.target.inflate(config)?; + let whitespace_before_walrus = parse_parenthesizable_whitespace( + config, + &mut self.walrus_tok.whitespace_before.borrow_mut(), + )?; + let whitespace_after_walrus = parse_parenthesizable_whitespace( + config, + &mut self.walrus_tok.whitespace_after.borrow_mut(), + )?; + let value = self.value.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + target, + value, + lpar, + rpar, + whitespace_before_walrus, + whitespace_after_walrus, + }) + } +} + +#[cfg(feature = "py")] +mod py { + + use pyo3::types::PyAnyMethods; + use pyo3::types::PyModule; + + use super::*; + use crate::nodes::traits::py::TryIntoPy; + + // TODO: this could be a derive helper attribute to override the python class name + impl<'a> TryIntoPy> for Element<'a> { + fn try_into_py(self, py: pyo3::Python) -> pyo3::PyResult> { + match self { + Self::Starred(s) => s.try_into_py(py), + Self::Simple { value, comma } => { + let libcst = PyModule::import(py, "libcst")?; + let kwargs = [ + Some(("value", value.try_into_py(py)?)), + comma + .map(|x| x.try_into_py(py)) + .transpose()? + .map(|x| ("comma", x)), + ] + .iter() + .filter(|x| x.is_some()) + .map(|x| x.as_ref().unwrap()) + .collect::>() + .into_py_dict(py)?; + Ok(libcst + .getattr("Element") + .expect("no Element found in libcst") + .call((), Some(&kwargs))? + .into()) + } + } + } + } + + // TODO: this could be a derive helper attribute to override the python class name + impl<'a> TryIntoPy> for DictElement<'a> { + fn try_into_py(self, py: pyo3::Python) -> pyo3::PyResult> { + match self { + Self::Starred(s) => s.try_into_py(py), + Self::Simple { + key, + value, + comma, + whitespace_after_colon, + whitespace_before_colon, + .. + } => { + let libcst = PyModule::import(py, "libcst")?; + let kwargs = [ + Some(("key", key.try_into_py(py)?)), + Some(("value", value.try_into_py(py)?)), + Some(( + "whitespace_before_colon", + whitespace_before_colon.try_into_py(py)?, + )), + Some(( + "whitespace_after_colon", + whitespace_after_colon.try_into_py(py)?, + )), + comma + .map(|x| x.try_into_py(py)) + .transpose()? + .map(|x| ("comma", x)), + ] + .iter() + .filter(|x| x.is_some()) + .map(|x| x.as_ref().unwrap()) + .collect::>() + .into_py_dict(py)?; + Ok(libcst + .getattr("DictElement") + .expect("no Element found in libcst") + .call((), Some(&kwargs))? + .into()) + } + } + } + } +} diff --git a/native/libcst/src/nodes/inflate_helpers.rs b/native/libcst/src/nodes/inflate_helpers.rs new file mode 100644 index 00000000..8bf5c8af --- /dev/null +++ b/native/libcst/src/nodes/inflate_helpers.rs @@ -0,0 +1,39 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use crate::{ + nodes::traits::Result, + tokenizer::{ + whitespace_parser::{parse_parenthesizable_whitespace, Config}, + Token, + }, + Param, Parameters, StarArg, +}; + +pub(crate) fn adjust_parameters_trailing_whitespace<'a>( + config: &Config<'a>, + parameters: &mut Parameters<'a>, + next_tok: &Token<'a>, +) -> Result<()> { + let do_adjust = |param: &mut Param<'a>| -> Result<()> { + let whitespace_after = + parse_parenthesizable_whitespace(config, &mut next_tok.whitespace_before.borrow_mut())?; + if param.comma.is_none() { + param.whitespace_after_param = whitespace_after; + } + Ok(()) + }; + + if let Some(param) = &mut parameters.star_kwarg { + do_adjust(param)?; + } else if let Some(param) = parameters.kwonly_params.last_mut() { + do_adjust(param)?; + } else if let Some(StarArg::Param(param)) = parameters.star_arg.as_mut() { + do_adjust(param)?; + } else if let Some(param) = parameters.params.last_mut() { + do_adjust(param)?; + } + Ok(()) +} diff --git a/native/libcst/src/nodes/macros.rs b/native/libcst/src/nodes/macros.rs new file mode 100644 index 00000000..11028b8c --- /dev/null +++ b/native/libcst/src/nodes/macros.rs @@ -0,0 +1,33 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +/// Generates a function that lazily imports and caches a module's member. This will hold a +/// permanent reference to the imported member. Python's module cache is rarely purged though, so +/// it typically won't matter. +/// +/// This cache is cheaper than looking up the module in python's module cache inspecting the +/// module's `__dict__` each time you want access to the member. +/// +/// If you have multiple imports from the same module, we'll call `py.import` once for each member +/// of the module. +#[macro_export] +macro_rules! py_import { + ( $module_name:expr, $member_name:expr, $getter_fn:ident ) => { + paste::paste! { + static [] + : pyo3::once_cell::PyOnceLock>> + = pyo3::once_cell::PyOnceLock::new(); + + fn $getter_fn<'py>(py: pyo3::Python<'py>) -> pyo3::PyResult<&'py pyo3::PyAny> { + Ok([].get_or_init(py, || { + Ok(py.import($module_name)?.get($member_name)?.to_object(py)) + }) + .as_ref() + .map_err(|err| err.clone_ref(py))? + .as_ref(py)) + } + } + }; +} diff --git a/native/libcst/src/nodes/mod.rs b/native/libcst/src/nodes/mod.rs new file mode 100644 index 00000000..e044db94 --- /dev/null +++ b/native/libcst/src/nodes/mod.rs @@ -0,0 +1,131 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +pub(crate) mod whitespace; +pub use whitespace::{ + Comment, EmptyLine, Fakeness, Newline, ParenthesizableWhitespace, ParenthesizedWhitespace, + SimpleWhitespace, TrailingWhitespace, +}; +pub(crate) mod statement; +pub use statement::{ + AnnAssign, Annotation, AsName, Assert, Assign, AssignTarget, AssignTargetExpression, AugAssign, + Break, ClassDef, CompoundStatement, Continue, Decorator, Del, DelTargetExpression, Else, + ExceptHandler, ExceptStarHandler, Expr, Finally, For, FunctionDef, Global, If, Import, + ImportAlias, ImportFrom, ImportNames, IndentedBlock, Match, MatchAs, MatchCase, MatchClass, + MatchKeywordElement, MatchList, MatchMapping, MatchMappingElement, MatchOr, MatchOrElement, + MatchPattern, MatchSequence, MatchSequenceElement, MatchSingleton, MatchStar, MatchTuple, + MatchValue, NameItem, Nonlocal, OrElse, Pass, Raise, Return, SimpleStatementLine, + SimpleStatementSuite, SmallStatement, StarrableMatchSequenceElement, Statement, Suite, Try, + TryStar, TypeAlias, TypeParam, TypeParameters, TypeVar, TypeVarLike, TypeVarTuple, While, With, + WithItem, +}; + +pub(crate) mod expression; +pub use expression::{ + Arg, Asynchronous, Attribute, Await, BaseSlice, BinaryOperation, BooleanOperation, Call, + CompFor, CompIf, Comparison, ComparisonTarget, ConcatenatedString, Dict, DictComp, DictElement, + Element, Ellipsis, Expression, Float, FormattedString, FormattedStringContent, + FormattedStringExpression, FormattedStringText, From, GeneratorExp, IfExp, Imaginary, Index, + Integer, Lambda, LeftCurlyBrace, LeftParen, LeftSquareBracket, List, ListComp, Name, + NameOrAttribute, NamedExpr, Param, ParamSlash, ParamStar, Parameters, RightCurlyBrace, + RightParen, RightSquareBracket, Set, SetComp, SimpleString, Slice, StarArg, StarredDictElement, + StarredElement, String, Subscript, SubscriptElement, TemplatedString, TemplatedStringContent, + TemplatedStringExpression, Tuple, UnaryOperation, Yield, YieldValue, +}; + +pub(crate) mod op; +pub use op::{ + AssignEqual, AugOp, BinaryOp, BitOr, BooleanOp, Colon, Comma, CompOp, Dot, ImportStar, + Semicolon, UnaryOp, +}; + +pub(crate) mod module; +pub use module::Module; + +mod codegen; +pub use codegen::{Codegen, CodegenState}; + +pub(crate) mod traits; +pub use traits::{Inflate, ParenthesizedNode, WithComma, WithLeadingLines}; + +pub(crate) mod inflate_helpers; + +pub(crate) mod deflated { + pub use super::expression::{ + DeflatedArg as Arg, DeflatedAsynchronous as Asynchronous, DeflatedAttribute as Attribute, + DeflatedAwait as Await, DeflatedBaseSlice as BaseSlice, + DeflatedBinaryOperation as BinaryOperation, DeflatedBooleanOperation as BooleanOperation, + DeflatedCall as Call, DeflatedCompFor as CompFor, DeflatedCompIf as CompIf, + DeflatedComparison as Comparison, DeflatedComparisonTarget as ComparisonTarget, + DeflatedConcatenatedString as ConcatenatedString, DeflatedDict as Dict, + DeflatedDictComp as DictComp, DeflatedDictElement as DictElement, + DeflatedElement as Element, DeflatedEllipsis as Ellipsis, DeflatedExpression as Expression, + DeflatedFloat as Float, DeflatedFormattedString as FormattedString, + DeflatedFormattedStringContent as FormattedStringContent, + DeflatedFormattedStringExpression as FormattedStringExpression, + DeflatedFormattedStringText as FormattedStringText, DeflatedFrom as From, + DeflatedGeneratorExp as GeneratorExp, DeflatedIfExp as IfExp, + DeflatedImaginary as Imaginary, DeflatedIndex as Index, DeflatedInteger as Integer, + DeflatedLambda as Lambda, DeflatedLeftCurlyBrace as LeftCurlyBrace, + DeflatedLeftParen as LeftParen, DeflatedLeftSquareBracket as LeftSquareBracket, + DeflatedList as List, DeflatedListComp as ListComp, DeflatedName as Name, + DeflatedNameOrAttribute as NameOrAttribute, DeflatedNamedExpr as NamedExpr, + DeflatedParam as Param, DeflatedParamSlash as ParamSlash, DeflatedParamStar as ParamStar, + DeflatedParameters as Parameters, DeflatedRightCurlyBrace as RightCurlyBrace, + DeflatedRightParen as RightParen, DeflatedRightSquareBracket as RightSquareBracket, + DeflatedSet as Set, DeflatedSetComp as SetComp, DeflatedSimpleString as SimpleString, + DeflatedSlice as Slice, DeflatedStarArg as StarArg, + DeflatedStarredDictElement as StarredDictElement, DeflatedStarredElement as StarredElement, + DeflatedString as String, DeflatedSubscript as Subscript, + DeflatedSubscriptElement as SubscriptElement, DeflatedTemplatedString as TemplatedString, + DeflatedTemplatedStringContent as TemplatedStringContent, + DeflatedTemplatedStringExpression as TemplatedStringExpression, + DeflatedTemplatedStringText as TemplatedStringText, DeflatedTuple as Tuple, + DeflatedUnaryOperation as UnaryOperation, DeflatedYield as Yield, + DeflatedYieldValue as YieldValue, + }; + pub use super::module::DeflatedModule as Module; + pub use super::op::{ + DeflatedAssignEqual as AssignEqual, DeflatedAugOp as AugOp, DeflatedBinaryOp as BinaryOp, + DeflatedBitOr as BitOr, DeflatedBooleanOp as BooleanOp, DeflatedColon as Colon, + DeflatedComma as Comma, DeflatedCompOp as CompOp, DeflatedDot as Dot, + DeflatedImportStar as ImportStar, DeflatedSemicolon as Semicolon, + DeflatedUnaryOp as UnaryOp, + }; + pub use super::statement::{ + DeflatedAnnAssign as AnnAssign, DeflatedAnnotation as Annotation, DeflatedAsName as AsName, + DeflatedAssert as Assert, DeflatedAssign as Assign, DeflatedAssignTarget as AssignTarget, + DeflatedAssignTargetExpression as AssignTargetExpression, DeflatedAugAssign as AugAssign, + DeflatedBreak as Break, DeflatedClassDef as ClassDef, + DeflatedCompoundStatement as CompoundStatement, DeflatedContinue as Continue, + DeflatedDecorator as Decorator, DeflatedDel as Del, + DeflatedDelTargetExpression as DelTargetExpression, DeflatedElse as Else, + DeflatedExceptHandler as ExceptHandler, DeflatedExceptStarHandler as ExceptStarHandler, + DeflatedExpr as Expr, DeflatedFinally as Finally, DeflatedFor as For, + DeflatedFunctionDef as FunctionDef, DeflatedGlobal as Global, DeflatedIf as If, + DeflatedImport as Import, DeflatedImportAlias as ImportAlias, + DeflatedImportFrom as ImportFrom, DeflatedImportNames as ImportNames, + DeflatedIndentedBlock as IndentedBlock, DeflatedMatch as Match, DeflatedMatchAs as MatchAs, + DeflatedMatchCase as MatchCase, DeflatedMatchClass as MatchClass, + DeflatedMatchKeywordElement as MatchKeywordElement, DeflatedMatchList as MatchList, + DeflatedMatchMapping as MatchMapping, DeflatedMatchMappingElement as MatchMappingElement, + DeflatedMatchOr as MatchOr, DeflatedMatchOrElement as MatchOrElement, + DeflatedMatchPattern as MatchPattern, DeflatedMatchSequence as MatchSequence, + DeflatedMatchSequenceElement as MatchSequenceElement, + DeflatedMatchSingleton as MatchSingleton, DeflatedMatchStar as MatchStar, + DeflatedMatchTuple as MatchTuple, DeflatedMatchValue as MatchValue, + DeflatedNameItem as NameItem, DeflatedNonlocal as Nonlocal, DeflatedOrElse as OrElse, + DeflatedParamSpec as ParamSpec, DeflatedPass as Pass, DeflatedRaise as Raise, + DeflatedReturn as Return, DeflatedSimpleStatementLine as SimpleStatementLine, + DeflatedSimpleStatementSuite as SimpleStatementSuite, + DeflatedSmallStatement as SmallStatement, + DeflatedStarrableMatchSequenceElement as StarrableMatchSequenceElement, + DeflatedStatement as Statement, DeflatedSuite as Suite, DeflatedTry as Try, + DeflatedTryStar as TryStar, DeflatedTypeAlias as TypeAlias, DeflatedTypeParam as TypeParam, + DeflatedTypeParameters as TypeParameters, DeflatedTypeVar as TypeVar, + DeflatedTypeVarLike as TypeVarLike, DeflatedTypeVarTuple as TypeVarTuple, + DeflatedWhile as While, DeflatedWith as With, DeflatedWithItem as WithItem, + }; +} diff --git a/native/libcst/src/nodes/module.rs b/native/libcst/src/nodes/module.rs new file mode 100644 index 00000000..ff9a2a73 --- /dev/null +++ b/native/libcst/src/nodes/module.rs @@ -0,0 +1,96 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::mem::swap; + +use crate::tokenizer::whitespace_parser::parse_empty_lines; +use crate::tokenizer::Token; +use crate::{ + nodes::{ + codegen::{Codegen, CodegenState}, + statement::*, + whitespace::EmptyLine, + }, + tokenizer::whitespace_parser::Config, +}; +use libcst_derive::cst_node; +#[cfg(feature = "py")] +use libcst_derive::TryIntoPy; + +use super::traits::{Inflate, Result, WithLeadingLines}; + +type TokenRef<'r, 'a> = &'r Token<'a>; + +#[cst_node] +pub struct Module<'a> { + pub body: Vec>, + pub header: Vec>, + pub footer: Vec>, + + pub default_indent: &'a str, + pub default_newline: &'a str, + pub has_trailing_newline: bool, + pub encoding: String, + + pub(crate) eof_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Module<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for h in &self.header { + h.codegen(state); + } + for s in &self.body { + s.codegen(state); + } + for nl in &self.footer { + nl.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedModule<'r, 'a> { + type Inflated = Module<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let default_indent = config.default_indent; + let default_newline = config.default_newline; + let has_trailing_newline = config.has_trailing_newline(); + let mut body = self.body.inflate(config)?; + let mut footer = parse_empty_lines( + config, + &mut (*self.eof_tok).whitespace_before.borrow_mut(), + Some(""), + )?; + let mut header = vec![]; + if let Some(stmt) = body.first_mut() { + swap(stmt.leading_lines(), &mut header); + let mut last_indented = None; + for (num, line) in footer.iter().enumerate() { + if !line.whitespace.0.is_empty() { + last_indented = Some(num); + } else if line.comment.is_some() { + // This is a non-indented comment. Everything from here should belong in the + // footer. + break; + } + } + if let Some(num) = last_indented { + let (_, rest) = footer.split_at(num); + footer = rest.to_vec(); + } + } else { + swap(&mut header, &mut footer); + } + Ok(Self::Inflated { + body, + header, + footer, + default_indent, + default_newline, + has_trailing_newline, + encoding: self.encoding, + }) + } +} diff --git a/native/libcst/src/nodes/op.rs b/native/libcst/src/nodes/op.rs new file mode 100644 index 00000000..3e02483e --- /dev/null +++ b/native/libcst/src/nodes/op.rs @@ -0,0 +1,1430 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use super::{whitespace::ParenthesizableWhitespace, Codegen, CodegenState}; +use crate::{ + nodes::traits::{Inflate, Result}, + tokenizer::{ + whitespace_parser::{parse_parenthesizable_whitespace, parse_simple_whitespace, Config}, + Token, + }, +}; +use libcst_derive::cst_node; +#[cfg(feature = "py")] +use libcst_derive::TryIntoPy; + +type TokenRef<'r, 'a> = &'r Token<'a>; + +#[cst_node] +pub struct Semicolon<'a> { + /// Any space that appears directly before this semicolon. + pub whitespace_before: ParenthesizableWhitespace<'a>, + /// Any space that appears directly after this semicolon. + pub whitespace_after: ParenthesizableWhitespace<'a>, + + #[cfg_attr(feature = "py", skip_py)] + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Semicolon<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token(";"); + self.whitespace_after.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedSemicolon<'r, 'a> { + type Inflated = Semicolon<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = ParenthesizableWhitespace::SimpleWhitespace( + parse_simple_whitespace(config, &mut (*self.tok).whitespace_before.borrow_mut())?, + ); + let whitespace_after = ParenthesizableWhitespace::SimpleWhitespace( + parse_simple_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut())?, + ); + Ok(Self::Inflated { + whitespace_before, + whitespace_after, + }) + } +} + +#[cst_node] +pub struct Comma<'a> { + /// Any space that appears directly before this comma. + pub whitespace_before: ParenthesizableWhitespace<'a>, + /// Any space that appears directly after this comma. + pub whitespace_after: ParenthesizableWhitespace<'a>, + + #[cfg_attr(feature = "py", skip_py)] + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Comma<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token(","); + self.whitespace_after.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedComma<'r, 'a> { + type Inflated = Comma<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_after.borrow_mut(), + )?; + Ok(Self::Inflated { + whitespace_before, + whitespace_after, + }) + } +} + +impl<'r, 'a> DeflatedComma<'r, 'a> { + pub fn inflate_before(self, config: &Config<'a>) -> Result> { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = Default::default(); + Ok(Comma { + whitespace_before, + whitespace_after, + }) + } +} + +#[cst_node] +pub struct AssignEqual<'a> { + /// Any space that appears directly before this equal sign. + pub whitespace_before: ParenthesizableWhitespace<'a>, + /// Any space that appears directly after this equal sign. + pub whitespace_after: ParenthesizableWhitespace<'a>, + + #[cfg_attr(feature = "py", skip_py)] + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for AssignEqual<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token("="); + self.whitespace_after.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedAssignEqual<'r, 'a> { + type Inflated = AssignEqual<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_after.borrow_mut(), + )?; + Ok(Self::Inflated { + whitespace_before, + whitespace_after, + }) + } +} + +#[cst_node] +pub struct Dot<'a> { + /// Any space that appears directly before this dot. + pub whitespace_before: ParenthesizableWhitespace<'a>, + /// Any space that appears directly after this dot. + pub whitespace_after: ParenthesizableWhitespace<'a>, + + #[cfg_attr(feature = "py", skip_py)] + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Dot<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token("."); + self.whitespace_after.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedDot<'r, 'a> { + type Inflated = Dot<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let whitespace_before = self.inflate_before(config)?; + let whitespace_after = self.inflate_after(config)?; + Ok(Self::Inflated { + whitespace_before, + whitespace_after, + }) + } +} + +impl<'r, 'a> DeflatedDot<'r, 'a> { + fn inflate_before(&mut self, config: &Config<'a>) -> Result> { + parse_parenthesizable_whitespace(config, &mut (*self.tok).whitespace_before.borrow_mut()) + } + + fn inflate_after(&mut self, config: &Config<'a>) -> Result> { + parse_parenthesizable_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut()) + } +} + +#[cst_node] +pub struct ImportStar {} + +pub(crate) fn make_importstar<'r, 'a>() -> DeflatedImportStar<'r, 'a> { + DeflatedImportStar { + _phantom: Default::default(), + } +} + +impl<'a> Codegen<'a> for ImportStar { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("*"); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedImportStar<'r, 'a> { + type Inflated = ImportStar; + fn inflate(self, _config: &Config<'a>) -> Result { + Ok(ImportStar {}) + } +} + +#[cst_node] +pub enum UnaryOp<'a> { + Plus { + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Minus { + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + BitInvert { + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Not { + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, +} + +impl<'a> Codegen<'a> for UnaryOp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + let (tok, whitespace_after) = match self { + Self::Plus { + whitespace_after, .. + } => ("+", whitespace_after), + Self::Minus { + whitespace_after, .. + } => ("-", whitespace_after), + Self::BitInvert { + whitespace_after, .. + } => ("~", whitespace_after), + Self::Not { + whitespace_after, .. + } => ("not", whitespace_after), + }; + state.add_token(tok); + whitespace_after.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedUnaryOp<'r, 'a> { + type Inflated = UnaryOp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + Ok(match self { + Self::Plus { tok, .. } => { + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Plus { whitespace_after } + } + Self::Minus { tok, .. } => { + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Minus { whitespace_after } + } + Self::BitInvert { tok, .. } => { + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::BitInvert { whitespace_after } + } + Self::Not { tok, .. } => { + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Not { whitespace_after } + } + }) + } +} + +#[cst_node] +pub enum BooleanOp<'a> { + And { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Or { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, +} + +impl<'a> Codegen<'a> for BooleanOp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + let (tok, ws_bef, ws_aft) = match self { + Self::And { + whitespace_after, + whitespace_before, + .. + } => ("and", whitespace_before, whitespace_after), + Self::Or { + whitespace_after, + whitespace_before, + .. + } => ("or", whitespace_before, whitespace_after), + }; + ws_bef.codegen(state); + state.add_token(tok); + ws_aft.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedBooleanOp<'r, 'a> { + type Inflated = BooleanOp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + Ok(match self { + Self::And { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::And { + whitespace_before, + whitespace_after, + } + } + Self::Or { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Or { + whitespace_before, + whitespace_after, + } + } + }) + } +} + +#[cst_node] +pub enum BinaryOp<'a> { + Add { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Subtract { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Multiply { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Divide { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + FloorDivide { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Modulo { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Power { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + LeftShift { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + RightShift { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + BitOr { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + BitAnd { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + BitXor { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + MatrixMultiply { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, +} + +impl<'a> Codegen<'a> for BinaryOp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + let (whitespace_before, whitespace_after) = match self { + Self::Add { + whitespace_before, + whitespace_after, + } + | Self::Subtract { + whitespace_before, + whitespace_after, + } + | Self::Multiply { + whitespace_before, + whitespace_after, + } + | Self::Divide { + whitespace_before, + whitespace_after, + } + | Self::FloorDivide { + whitespace_before, + whitespace_after, + } + | Self::Modulo { + whitespace_before, + whitespace_after, + } + | Self::Power { + whitespace_before, + whitespace_after, + } + | Self::LeftShift { + whitespace_before, + whitespace_after, + } + | Self::RightShift { + whitespace_before, + whitespace_after, + } + | Self::BitOr { + whitespace_before, + whitespace_after, + } + | Self::BitAnd { + whitespace_before, + whitespace_after, + } + | Self::BitXor { + whitespace_before, + whitespace_after, + } + | Self::MatrixMultiply { + whitespace_before, + whitespace_after, + } => (whitespace_before, whitespace_after), + }; + let tok = match self { + BinaryOp::Add { .. } => "+", + BinaryOp::Subtract { .. } => "-", + BinaryOp::Multiply { .. } => "*", + BinaryOp::Divide { .. } => "/", + BinaryOp::FloorDivide { .. } => "//", + BinaryOp::Modulo { .. } => "%", + BinaryOp::Power { .. } => "**", + BinaryOp::LeftShift { .. } => "<<", + BinaryOp::RightShift { .. } => ">>", + BinaryOp::BitOr { .. } => "|", + BinaryOp::BitAnd { .. } => "&", + BinaryOp::BitXor { .. } => "^", + BinaryOp::MatrixMultiply { .. } => "@", + }; + whitespace_before.codegen(state); + state.add_token(tok); + whitespace_after.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedBinaryOp<'r, 'a> { + type Inflated = BinaryOp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + Ok(match self { + Self::Add { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Add { + whitespace_before, + whitespace_after, + } + } + Self::Subtract { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Subtract { + whitespace_before, + whitespace_after, + } + } + Self::Multiply { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Multiply { + whitespace_before, + whitespace_after, + } + } + Self::Divide { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Divide { + whitespace_before, + whitespace_after, + } + } + Self::FloorDivide { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::FloorDivide { + whitespace_before, + whitespace_after, + } + } + Self::Modulo { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Modulo { + whitespace_before, + whitespace_after, + } + } + Self::Power { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Power { + whitespace_before, + whitespace_after, + } + } + Self::LeftShift { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::LeftShift { + whitespace_before, + whitespace_after, + } + } + Self::RightShift { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::RightShift { + whitespace_before, + whitespace_after, + } + } + Self::BitOr { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::BitOr { + whitespace_before, + whitespace_after, + } + } + Self::BitAnd { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::BitAnd { + whitespace_before, + whitespace_after, + } + } + Self::BitXor { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::BitXor { + whitespace_before, + whitespace_after, + } + } + Self::MatrixMultiply { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::MatrixMultiply { + whitespace_before, + whitespace_after, + } + } + }) + } +} + +#[cst_node] +pub enum CompOp<'a> { + LessThan { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + GreaterThan { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + LessThanEqual { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + GreaterThanEqual { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + Equal { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + NotEqual { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + In { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + NotIn { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_between: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + not_tok: TokenRef<'a>, + #[cfg_attr(feature = "py", skip_py)] + in_tok: TokenRef<'a>, + }, + Is { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + IsNot { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_between: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + is_tok: TokenRef<'a>, + #[cfg_attr(feature = "py", skip_py)] + not_tok: TokenRef<'a>, + }, +} + +impl<'a> Codegen<'a> for CompOp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + let (bef, aft, between) = match self { + Self::LessThan { + whitespace_before, + whitespace_after, + } + | Self::GreaterThan { + whitespace_before, + whitespace_after, + } + | Self::LessThanEqual { + whitespace_before, + whitespace_after, + } + | Self::GreaterThanEqual { + whitespace_before, + whitespace_after, + } + | Self::Equal { + whitespace_before, + whitespace_after, + } + | Self::NotEqual { + whitespace_before, + whitespace_after, + } + | Self::In { + whitespace_before, + whitespace_after, + } + | Self::Is { + whitespace_before, + whitespace_after, + } => (whitespace_before, whitespace_after, None), + Self::IsNot { + whitespace_before, + whitespace_between, + whitespace_after, + } => ( + whitespace_before, + whitespace_after, + Some(whitespace_between), + ), + Self::NotIn { + whitespace_before, + whitespace_between, + whitespace_after, + } => ( + whitespace_before, + whitespace_after, + Some(whitespace_between), + ), + }; + let (first_tok, second_tok) = match self { + CompOp::LessThan { .. } => ("<", None), + CompOp::GreaterThan { .. } => (">", None), + CompOp::LessThanEqual { .. } => ("<=", None), + CompOp::GreaterThanEqual { .. } => (">=", None), + CompOp::Equal { .. } => ("==", None), + CompOp::NotEqual { .. } => ("!=", None), + CompOp::In { .. } => ("in", None), + CompOp::NotIn { .. } => ("not", Some("in")), + CompOp::Is { .. } => ("is", None), + CompOp::IsNot { .. } => ("is", Some("not")), + }; + bef.codegen(state); + state.add_token(first_tok); + if let (Some(btw), Some(second_tok)) = (between, second_tok) { + btw.codegen(state); + state.add_token(second_tok); + } + aft.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedCompOp<'r, 'a> { + type Inflated = CompOp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + Ok(match self { + Self::LessThan { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::LessThan { + whitespace_before, + whitespace_after, + } + } + Self::GreaterThan { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::GreaterThan { + whitespace_before, + whitespace_after, + } + } + Self::LessThanEqual { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::LessThanEqual { + whitespace_before, + whitespace_after, + } + } + Self::GreaterThanEqual { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::GreaterThanEqual { + whitespace_before, + whitespace_after, + } + } + Self::Equal { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Equal { + whitespace_before, + whitespace_after, + } + } + Self::NotEqual { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::NotEqual { + whitespace_before, + whitespace_after, + } + } + Self::In { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::In { + whitespace_before, + whitespace_after, + } + } + Self::Is { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::Is { + whitespace_before, + whitespace_after, + } + } + Self::IsNot { + is_tok, not_tok, .. + } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*is_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_between = parse_parenthesizable_whitespace( + config, + &mut (*is_tok).whitespace_after.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*not_tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::IsNot { + whitespace_before, + whitespace_between, + whitespace_after, + } + } + Self::NotIn { + not_tok, in_tok, .. + } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*not_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_between = parse_parenthesizable_whitespace( + config, + &mut (*not_tok).whitespace_after.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*in_tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::NotIn { + whitespace_before, + whitespace_between, + whitespace_after, + } + } + }) + } +} + +#[cst_node] +pub struct Colon<'a> { + pub whitespace_before: ParenthesizableWhitespace<'a>, + pub whitespace_after: ParenthesizableWhitespace<'a>, + + #[cfg_attr(feature = "py", skip_py)] + pub(crate) tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedColon<'r, 'a> { + type Inflated = Colon<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_after.borrow_mut(), + )?; + Ok(Self::Inflated { + whitespace_before, + whitespace_after, + }) + } +} + +impl<'a> Codegen<'a> for Colon<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token(":"); + self.whitespace_after.codegen(state); + } +} + +#[cst_node] +pub enum AugOp<'a> { + AddAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + SubtractAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + MultiplyAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + MatrixMultiplyAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + DivideAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + ModuloAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + BitAndAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + BitOrAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + BitXorAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + LeftShiftAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + RightShiftAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + PowerAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, + FloorDivideAssign { + whitespace_before: ParenthesizableWhitespace<'a>, + whitespace_after: ParenthesizableWhitespace<'a>, + #[cfg_attr(feature = "py", skip_py)] + tok: TokenRef<'a>, + }, +} + +impl<'r, 'a> Inflate<'a> for DeflatedAugOp<'r, 'a> { + type Inflated = AugOp<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + Ok(match self { + Self::AddAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::AddAssign { + whitespace_before, + whitespace_after, + } + } + Self::SubtractAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::SubtractAssign { + whitespace_before, + whitespace_after, + } + } + Self::MultiplyAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::MultiplyAssign { + whitespace_before, + whitespace_after, + } + } + Self::MatrixMultiplyAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::MatrixMultiplyAssign { + whitespace_before, + whitespace_after, + } + } + Self::DivideAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::DivideAssign { + whitespace_before, + whitespace_after, + } + } + Self::ModuloAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::ModuloAssign { + whitespace_before, + whitespace_after, + } + } + Self::BitAndAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::BitAndAssign { + whitespace_before, + whitespace_after, + } + } + Self::BitOrAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::BitOrAssign { + whitespace_before, + whitespace_after, + } + } + Self::BitXorAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::BitXorAssign { + whitespace_before, + whitespace_after, + } + } + Self::LeftShiftAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::LeftShiftAssign { + whitespace_before, + whitespace_after, + } + } + Self::RightShiftAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::RightShiftAssign { + whitespace_before, + whitespace_after, + } + } + Self::PowerAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::PowerAssign { + whitespace_before, + whitespace_after, + } + } + Self::FloorDivideAssign { tok, .. } => { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*tok).whitespace_after.borrow_mut(), + )?; + Self::Inflated::FloorDivideAssign { + whitespace_before, + whitespace_after, + } + } + }) + } +} + +impl<'a> Codegen<'a> for AugOp<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + let (tok, bef, aft) = match self { + Self::AddAssign { + whitespace_before, + whitespace_after, + .. + } => ("+=", whitespace_before, whitespace_after), + Self::SubtractAssign { + whitespace_before, + whitespace_after, + .. + } => ("-=", whitespace_before, whitespace_after), + Self::MultiplyAssign { + whitespace_before, + whitespace_after, + .. + } => ("*=", whitespace_before, whitespace_after), + Self::MatrixMultiplyAssign { + whitespace_before, + whitespace_after, + .. + } => ("@=", whitespace_before, whitespace_after), + Self::DivideAssign { + whitespace_before, + whitespace_after, + .. + } => ("/=", whitespace_before, whitespace_after), + Self::ModuloAssign { + whitespace_before, + whitespace_after, + .. + } => ("%=", whitespace_before, whitespace_after), + Self::BitAndAssign { + whitespace_before, + whitespace_after, + .. + } => ("&=", whitespace_before, whitespace_after), + Self::BitOrAssign { + whitespace_before, + whitespace_after, + .. + } => ("|=", whitespace_before, whitespace_after), + Self::BitXorAssign { + whitespace_before, + whitespace_after, + .. + } => ("^=", whitespace_before, whitespace_after), + Self::LeftShiftAssign { + whitespace_before, + whitespace_after, + .. + } => ("<<=", whitespace_before, whitespace_after), + Self::RightShiftAssign { + whitespace_before, + whitespace_after, + .. + } => (">>=", whitespace_before, whitespace_after), + Self::PowerAssign { + whitespace_before, + whitespace_after, + .. + } => ("**=", whitespace_before, whitespace_after), + Self::FloorDivideAssign { + whitespace_before, + whitespace_after, + .. + } => ("//=", whitespace_before, whitespace_after), + }; + bef.codegen(state); + state.add_token(tok); + aft.codegen(state); + } +} + +#[cst_node] +pub struct BitOr<'a> { + pub whitespace_before: ParenthesizableWhitespace<'a>, + pub whitespace_after: ParenthesizableWhitespace<'a>, + + pub(crate) tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedBitOr<'r, 'a> { + type Inflated = BitOr<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_after.borrow_mut(), + )?; + Ok(Self::Inflated { + whitespace_before, + whitespace_after, + }) + } +} + +impl<'a> Codegen<'a> for BitOr<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before.codegen(state); + state.add_token("|"); + self.whitespace_after.codegen(state); + } +} diff --git a/native/libcst/src/nodes/parser_config.rs b/native/libcst/src/nodes/parser_config.rs new file mode 100644 index 00000000..e274d8df --- /dev/null +++ b/native/libcst/src/nodes/parser_config.rs @@ -0,0 +1,138 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use pyo3::exceptions::PyIndexError; +use pyo3::prelude::*; +use pyo3::types::{IntoPyDict, PyDict, PySequence, PyString}; +use pyo3::wrap_pyfunction; + +use crate::py_cached::PyCached; + +#[pyclass(subclass, module = "libcst_native.parser_config")] +#[text_signature = "(*, lines, default_newline)"] +pub struct BaseWhitespaceParserConfig { + pub lines: PyCached>, + pub default_newline: PyCached, +} + +#[pymethods] +impl BaseWhitespaceParserConfig { + #[new] + fn new(lines: &PySequence, default_newline: &PyString) -> PyResult { + // These fields will get initialized when ParserConfig.__init__ (our subclass) runs + Ok(Self { + lines: lines.extract()?, + default_newline: default_newline.extract()?, + }) + } + + #[getter] + fn get_lines(&self, py: Python) -> Py { + self.lines.to_object(py) + } + + #[getter] + fn get_default_newline(&self, py: Python) -> Py { + self.default_newline.to_object(py) + } +} + +impl BaseWhitespaceParserConfig { + /// Equivalent to `config.lines.unwrap()[line_number - 1]`, but it return a PyErr when we get + /// an index that's out of range, instead of panicing. + pub fn get_line(&self, line_number: usize) -> PyResult<&str> { + let err_fn = + || PyIndexError::new_err(format!("line number of {} is out of range", line_number)); + self.lines + .get(line_number.checked_sub(1).ok_or_else(err_fn)?) + .map(|l| &l[..]) + .ok_or_else(err_fn) + } + + /// Equivalent to `config.get_line(line_number)[column_index..]`, but it return a PyErr when + /// we get an column index that's out of range, instead of panicing. + pub fn get_line_after_column(&self, line_number: usize, column_index: usize) -> PyResult<&str> { + self.get_line(line_number)? + .get(column_index..) + .ok_or_else(|| { + PyIndexError::new_err(format!("column index of {} is out of range", column_index)) + }) + } +} + +// These fields are private and Py, since we don't currently care about using them from +// within rust. +#[pyclass(extends=BaseWhitespaceParserConfig, module="libcst_native.parser_config")] +#[text_signature = "(*, lines, encoding, default_indent, default_newline, has_trailing_newline, version, future_imports)"] +pub struct ParserConfig { + // lines is inherited + #[pyo3(get)] + encoding: Py, + #[pyo3(get)] + default_indent: Py, + // default_newline is inherited + #[pyo3(get)] + has_trailing_newline: Py, + #[pyo3(get)] + version: Py, + #[pyo3(get)] + future_imports: Py, +} + +#[pymethods] +impl ParserConfig { + #[new] + fn new( + lines: &PySequence, + encoding: Py, + default_indent: Py, + default_newline: &PyString, + has_trailing_newline: Py, + version: Py, + future_imports: Py, + ) -> PyResult<(Self, BaseWhitespaceParserConfig)> { + Ok(( + Self { + encoding, + default_indent, + has_trailing_newline, + version, + future_imports, + }, + BaseWhitespaceParserConfig::new(lines, default_newline)?, + )) + } +} + +/// An internal helper function used by python unit tests to compare configs. +#[pyfunction] +fn parser_config_asdict<'py>(py: Python<'py>, config: PyRef<'py, ParserConfig>) -> &'py PyDict { + let super_config: &BaseWhitespaceParserConfig = config.as_ref(); + vec![ + ("lines", super_config.lines.to_object(py)), + ("encoding", config.encoding.clone_ref(py)), + ("default_indent", config.default_indent.clone_ref(py)), + ( + "default_newline", + super_config.default_newline.to_object(py), + ), + ( + "has_trailing_newline", + config.has_trailing_newline.clone_ref(py), + ), + ("version", config.version.clone_ref(py)), + ("future_imports", config.future_imports.clone_ref(py)), + ] + .into_py_dict(py) + .unwrap() +} + +pub fn init_module(_py: Python, m: &PyModule) -> PyResult<()> { + m.add_class::()?; + m.add_class::()?; + m.add_function(wrap_pyfunction!(parser_config_asdict, m)?) + .unwrap(); + Ok(self) +} diff --git a/native/libcst/src/nodes/py_cached.rs b/native/libcst/src/nodes/py_cached.rs new file mode 100644 index 00000000..307082da --- /dev/null +++ b/native/libcst/src/nodes/py_cached.rs @@ -0,0 +1,76 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use pyo3::prelude::*; +use std::convert::AsRef; +use std::ops::Deref; + +/// An immutable wrapper around a rust type T and its Py equivalent. Caches the conversion +/// to and from the Py. +pub struct PyCached { + native: T, + py_object: Py, +} + +impl PyCached +where + T: ToPyObject, +{ + pub fn new(py: Python, native: T) -> Self { + Self { + py_object: native.to_object(py), + native, + } + } +} + +impl<'source, T> FromPyObject<'source> for PyCached +where + T: FromPyObject<'source>, +{ + fn extract(ob: &'source PyAny) -> PyResult { + Python::attach(|py| { + Ok(PyCached { + native: ob.extract()?, + py_object: ob.to_object(py), + }) + }) + } +} + +impl IntoPy> for PyCached { + fn into_py(self, _py: Python) -> Py { + self.py_object + } +} + +impl ToPyObject for PyCached { + fn to_object(&self, py: Python) -> Py { + self.py_object.clone_ref(py) + } +} + +impl AsRef for PyCached { + fn as_ref(&self) -> &T { + &self.native + } +} + +impl Deref for PyCached { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.native + } +} + +impl From for PyCached +where + T: ToPyObject, +{ + fn from(val: T) -> Self { + Python::attach(|py| Self::new(py, val)) + } +} diff --git a/native/libcst/src/nodes/statement.rs b/native/libcst/src/nodes/statement.rs new file mode 100644 index 00000000..d40ef340 --- /dev/null +++ b/native/libcst/src/nodes/statement.rs @@ -0,0 +1,3644 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::mem::swap; + +use super::{ + inflate_helpers::adjust_parameters_trailing_whitespace, Attribute, Codegen, CodegenState, + Comma, Dot, EmptyLine, Expression, From, ImportStar, LeftParen, List, Name, NameOrAttribute, + Parameters, ParenthesizableWhitespace, RightParen, Semicolon, SimpleWhitespace, StarredElement, + Subscript, TrailingWhitespace, Tuple, +}; +use crate::{ + nodes::{ + expression::*, + op::*, + traits::{ + Inflate, ParenthesizedDeflatedNode, ParenthesizedNode, Result, WithComma, + WithLeadingLines, + }, + }, + tokenizer::{ + whitespace_parser::{ + parse_empty_lines, parse_parenthesizable_whitespace, parse_simple_whitespace, + parse_trailing_whitespace, Config, + }, + Token, + }, + LeftCurlyBrace, LeftSquareBracket, RightCurlyBrace, RightSquareBracket, +}; +#[cfg(feature = "py")] +use libcst_derive::TryIntoPy; +use libcst_derive::{cst_node, Codegen, Inflate, ParenthesizedDeflatedNode, ParenthesizedNode}; + +type TokenRef<'r, 'a> = &'r Token<'a>; + +#[allow(clippy::large_enum_variant)] +#[cst_node(Inflate, Codegen)] +pub enum Statement<'a> { + Simple(SimpleStatementLine<'a>), + Compound(CompoundStatement<'a>), +} + +impl<'a> WithLeadingLines<'a> for Statement<'a> { + fn leading_lines(&mut self) -> &mut Vec> { + match self { + Self::Simple(s) => &mut s.leading_lines, + Self::Compound(c) => c.leading_lines(), + } + } +} + +#[allow(clippy::large_enum_variant)] +#[cst_node(Inflate, Codegen)] +pub enum CompoundStatement<'a> { + FunctionDef(FunctionDef<'a>), + If(If<'a>), + For(For<'a>), + While(While<'a>), + ClassDef(ClassDef<'a>), + Try(Try<'a>), + TryStar(TryStar<'a>), + With(With<'a>), + Match(Match<'a>), +} + +impl<'a> WithLeadingLines<'a> for CompoundStatement<'a> { + fn leading_lines(&mut self) -> &mut Vec> { + match self { + Self::FunctionDef(f) => &mut f.leading_lines, + Self::If(f) => &mut f.leading_lines, + Self::For(f) => &mut f.leading_lines, + Self::While(f) => &mut f.leading_lines, + Self::ClassDef(c) => &mut c.leading_lines, + Self::Try(t) => &mut t.leading_lines, + Self::TryStar(t) => &mut t.leading_lines, + Self::With(w) => &mut w.leading_lines, + Self::Match(m) => &mut m.leading_lines, + } + } +} + +#[cst_node(Inflate, Codegen)] +pub enum Suite<'a> { + IndentedBlock(IndentedBlock<'a>), + SimpleStatementSuite(SimpleStatementSuite<'a>), +} + +#[cst_node] +pub struct IndentedBlock<'a> { + /// Sequence of statements belonging to this indented block. + pub body: Vec>, + /// Any optional trailing comment and the final ``NEWLINE`` at the end of the line. + pub header: TrailingWhitespace<'a>, + /// A string represents a specific indentation. A ``None`` value uses the modules's + /// default indentation. This is included because indentation is allowed to be + /// inconsistent across a file, just not ambiguously. + pub indent: Option<&'a str>, + /// Any trailing comments or lines after the dedent that are owned by this indented + /// block. Statements own preceeding and same-line trailing comments, but not + /// trailing lines, so it falls on :class:`IndentedBlock` to own it. In the case + /// that a statement follows an :class:`IndentedBlock`, that statement will own the + /// comments and lines that are at the same indent as the statement, and this + /// :class:`IndentedBlock` will own the comments and lines that are indented + /// further. + pub footer: Vec>, + + pub(crate) newline_tok: TokenRef<'a>, + pub(crate) indent_tok: TokenRef<'a>, + pub(crate) dedent_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for IndentedBlock<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.header.codegen(state); + + let indent = match self.indent { + Some(i) => i, + None => state.default_indent, + }; + state.indent(indent); + + if self.body.is_empty() { + // Empty indented blocks are not syntactically valid in Python unless they + // contain a 'pass' statement, so add one here. + state.add_indent(); + state.add_token("pass"); + state.add_token(state.default_newline); + } else { + for stmt in &self.body { + // IndentedBlock is responsible for adjusting the current indentation + // level, but its children are responsible for actually adding that + // indentation to the token list. + stmt.codegen(state); + } + } + + for f in &self.footer { + f.codegen(state); + } + + state.dedent(); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedIndentedBlock<'r, 'a> { + type Inflated = IndentedBlock<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let body = self.body.inflate(config)?; + // We want to be able to only keep comments in the footer that are actually for + // this IndentedBlock. We do so by assuming that lines which are indented to the + // same level as the block itself are comments that go at the footer of the + // block. Comments that are indented to less than this indent are assumed to + // belong to the next line of code. We override the indent here because the + // dedent node's absolute indent is the resulting indentation after the dedent + // is performed. Its this way because the whitespace state for both the dedent's + // whitespace_after and the next BaseCompoundStatement's whitespace_before is + // shared. This allows us to partially parse here and parse the rest of the + // whitespace and comments on the next line, effectively making sure that + // comments are attached to the correct node. + let footer = parse_empty_lines( + config, + &mut (*self.dedent_tok).whitespace_after.borrow_mut(), + Some(self.indent_tok.whitespace_before.borrow().absolute_indent), + )?; + let header = parse_trailing_whitespace( + config, + &mut (*self.newline_tok).whitespace_before.borrow_mut(), + )?; + let mut indent = self.indent_tok.relative_indent; + if indent == Some(config.default_indent) { + indent = None; + } + Ok(Self::Inflated { + body, + header, + indent, + footer, + }) + } +} + +#[cst_node] +pub struct SimpleStatementSuite<'a> { + /// Sequence of small statements. All but the last statement are required to have + /// a semicolon. + pub body: Vec>, + + /// The whitespace between the colon in the parent statement and the body. + pub leading_whitespace: SimpleWhitespace<'a>, + /// Any optional trailing comment and the final ``NEWLINE`` at the end of the line. + pub trailing_whitespace: TrailingWhitespace<'a>, + + pub(crate) first_tok: TokenRef<'a>, + pub(crate) newline_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedSimpleStatementSuite<'r, 'a> { + type Inflated = SimpleStatementSuite<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_whitespace = parse_simple_whitespace( + config, + &mut (*self.first_tok).whitespace_before.borrow_mut(), + )?; + let body = self.body.inflate(config)?; + let trailing_whitespace = parse_trailing_whitespace( + config, + &mut (*self.newline_tok).whitespace_before.borrow_mut(), + )?; + Ok(Self::Inflated { + body, + leading_whitespace, + trailing_whitespace, + }) + } +} + +fn _simple_statement_codegen<'a>( + body: &[SmallStatement<'a>], + trailing_whitespace: &TrailingWhitespace<'a>, + state: &mut CodegenState<'a>, +) { + for stmt in body { + stmt.codegen(state); + // TODO: semicolon + } + if body.is_empty() { + // Empty simple statement blocks are not syntactically valid in Python + // unless they contain a 'pass' statement, so add one here. + state.add_token("pass") + } + trailing_whitespace.codegen(state); +} + +impl<'a> Codegen<'a> for SimpleStatementSuite<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.leading_whitespace.codegen(state); + _simple_statement_codegen(&self.body, &self.trailing_whitespace, state); + } +} + +#[cst_node] +pub struct SimpleStatementLine<'a> { + /// Sequence of small statements. All but the last statement are required to have + /// a semicolon. + pub body: Vec>, + + /// Sequence of empty lines appearing before this simple statement line. + pub leading_lines: Vec>, + /// Any optional trailing comment and the final ``NEWLINE`` at the end of the line. + pub trailing_whitespace: TrailingWhitespace<'a>, + + pub(crate) first_tok: TokenRef<'a>, + pub(crate) newline_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for SimpleStatementLine<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for line in &self.leading_lines { + line.codegen(state); + } + state.add_indent(); + _simple_statement_codegen(&self.body, &self.trailing_whitespace, state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedSimpleStatementLine<'r, 'a> { + type Inflated = SimpleStatementLine<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.first_tok).whitespace_before.borrow_mut(), + None, + )?; + let body = self.body.inflate(config)?; + let trailing_whitespace = parse_trailing_whitespace( + config, + &mut (*self.newline_tok).whitespace_before.borrow_mut(), + )?; + Ok(Self::Inflated { + body, + leading_lines, + trailing_whitespace, + }) + } +} + +#[allow(dead_code, clippy::large_enum_variant)] +#[cst_node(Codegen, Inflate)] +pub enum SmallStatement<'a> { + Pass(Pass<'a>), + Break(Break<'a>), + Continue(Continue<'a>), + Return(Return<'a>), + Expr(Expr<'a>), + Assert(Assert<'a>), + Import(Import<'a>), + ImportFrom(ImportFrom<'a>), + Assign(Assign<'a>), + AnnAssign(AnnAssign<'a>), + Raise(Raise<'a>), + Global(Global<'a>), + Nonlocal(Nonlocal<'a>), + AugAssign(AugAssign<'a>), + Del(Del<'a>), + TypeAlias(TypeAlias<'a>), +} + +impl<'r, 'a> DeflatedSmallStatement<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + match self { + Self::Pass(p) => Self::Pass(p.with_semicolon(semicolon)), + Self::Break(p) => Self::Break(p.with_semicolon(semicolon)), + Self::Continue(p) => Self::Continue(p.with_semicolon(semicolon)), + Self::Expr(p) => Self::Expr(p.with_semicolon(semicolon)), + Self::Import(i) => Self::Import(i.with_semicolon(semicolon)), + Self::ImportFrom(i) => Self::ImportFrom(i.with_semicolon(semicolon)), + Self::Assign(a) => Self::Assign(a.with_semicolon(semicolon)), + Self::AnnAssign(a) => Self::AnnAssign(a.with_semicolon(semicolon)), + Self::Return(r) => Self::Return(r.with_semicolon(semicolon)), + Self::Assert(a) => Self::Assert(a.with_semicolon(semicolon)), + Self::Raise(r) => Self::Raise(r.with_semicolon(semicolon)), + Self::Global(g) => Self::Global(g.with_semicolon(semicolon)), + Self::Nonlocal(l) => Self::Nonlocal(l.with_semicolon(semicolon)), + Self::AugAssign(a) => Self::AugAssign(a.with_semicolon(semicolon)), + Self::Del(d) => Self::Del(d.with_semicolon(semicolon)), + Self::TypeAlias(t) => Self::TypeAlias(t.with_semicolon(semicolon)), + } + } +} + +#[cst_node] +pub struct Pass<'a> { + pub semicolon: Option>, +} +impl<'r, 'a> DeflatedPass<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon } + } +} +impl<'a> Codegen<'a> for Pass<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("pass"); + self.semicolon.codegen(state); + } +} +impl<'r, 'a> Inflate<'a> for DeflatedPass<'r, 'a> { + type Inflated = Pass<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { semicolon }) + } +} + +#[cst_node] +pub struct Break<'a> { + pub semicolon: Option>, +} +impl<'r, 'a> DeflatedBreak<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon } + } +} +impl<'a> Codegen<'a> for Break<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("break"); + self.semicolon.codegen(state); + } +} +impl<'r, 'a> Inflate<'a> for DeflatedBreak<'r, 'a> { + type Inflated = Break<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { semicolon }) + } +} + +#[cst_node] +pub struct Continue<'a> { + pub semicolon: Option>, +} +impl<'r, 'a> DeflatedContinue<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon } + } +} +impl<'a> Codegen<'a> for Continue<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("continue"); + self.semicolon.codegen(state); + } +} +impl<'r, 'a> Inflate<'a> for DeflatedContinue<'r, 'a> { + type Inflated = Continue<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { semicolon }) + } +} + +#[cst_node] +pub struct Expr<'a> { + pub value: Expression<'a>, + pub semicolon: Option>, +} +impl<'r, 'a> DeflatedExpr<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} +impl<'a> Codegen<'a> for Expr<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.value.codegen(state); + self.semicolon.codegen(state); + } +} +impl<'r, 'a> Inflate<'a> for DeflatedExpr<'r, 'a> { + type Inflated = Expr<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let value = self.value.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { value, semicolon }) + } +} + +#[cst_node] +pub struct Assign<'a> { + pub targets: Vec>, + pub value: Expression<'a>, + pub semicolon: Option>, +} + +impl<'a> Codegen<'a> for Assign<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for target in &self.targets { + target.codegen(state); + } + self.value.codegen(state); + if let Some(semi) = &self.semicolon { + semi.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedAssign<'r, 'a> { + type Inflated = Assign<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let targets = self.targets.inflate(config)?; + let value = self.value.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + targets, + value, + semicolon, + }) + } +} + +impl<'r, 'a> DeflatedAssign<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct AssignTarget<'a> { + pub target: AssignTargetExpression<'a>, + pub whitespace_before_equal: SimpleWhitespace<'a>, + pub whitespace_after_equal: SimpleWhitespace<'a>, + + pub(crate) equal_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for AssignTarget<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.target.codegen(state); + self.whitespace_before_equal.codegen(state); + state.add_token("="); + self.whitespace_after_equal.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedAssignTarget<'r, 'a> { + type Inflated = AssignTarget<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let target = self.target.inflate(config)?; + let whitespace_before_equal = parse_simple_whitespace( + config, + &mut (*self.equal_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after_equal = + parse_simple_whitespace(config, &mut (*self.equal_tok).whitespace_after.borrow_mut())?; + Ok(Self::Inflated { + target, + whitespace_before_equal, + whitespace_after_equal, + }) + } +} + +#[allow(clippy::large_enum_variant)] +#[cst_node(Codegen, ParenthesizedNode, Inflate)] +pub enum AssignTargetExpression<'a> { + Name(Box>), + Attribute(Box>), + StarredElement(Box>), + Tuple(Box>), + List(Box>), + Subscript(Box>), +} + +#[cst_node] +pub struct Import<'a> { + pub names: Vec>, + pub semicolon: Option>, + pub whitespace_after_import: SimpleWhitespace<'a>, + + pub(crate) import_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Import<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("import"); + self.whitespace_after_import.codegen(state); + for (i, name) in self.names.iter().enumerate() { + name.codegen(state); + if name.comma.is_none() && i < self.names.len() - 1 { + state.add_token(", "); + } + } + if let Some(semi) = &self.semicolon { + semi.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedImport<'r, 'a> { + type Inflated = Import<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_import = parse_simple_whitespace( + config, + &mut (*self.import_tok).whitespace_after.borrow_mut(), + )?; + let names = self.names.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + names, + semicolon, + whitespace_after_import, + }) + } +} + +impl<'r, 'a> DeflatedImport<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct ImportFrom<'a> { + #[cfg_attr(feature = "py", no_py_default)] + pub module: Option>, + pub names: ImportNames<'a>, + pub relative: Vec>, + pub lpar: Option>, + pub rpar: Option>, + pub semicolon: Option>, + pub whitespace_after_from: SimpleWhitespace<'a>, + pub whitespace_before_import: SimpleWhitespace<'a>, + pub whitespace_after_import: SimpleWhitespace<'a>, + + pub(crate) from_tok: TokenRef<'a>, + pub(crate) import_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for ImportFrom<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("from"); + self.whitespace_after_from.codegen(state); + for dot in &self.relative { + dot.codegen(state); + } + if let Some(module) = &self.module { + module.codegen(state); + } + self.whitespace_before_import.codegen(state); + state.add_token("import"); + self.whitespace_after_import.codegen(state); + if let Some(lpar) = &self.lpar { + lpar.codegen(state); + } + self.names.codegen(state); + if let Some(rpar) = &self.rpar { + rpar.codegen(state); + } + + if let Some(semi) = &self.semicolon { + semi.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedImportFrom<'r, 'a> { + type Inflated = ImportFrom<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_from = + parse_simple_whitespace(config, &mut (*self.from_tok).whitespace_after.borrow_mut())?; + + let module = self.module.inflate(config)?; + + let whitespace_after_import = parse_simple_whitespace( + config, + &mut (*self.import_tok).whitespace_after.borrow_mut(), + )?; + + let mut relative = inflate_dots(self.relative, config)?; + let mut whitespace_before_import = Default::default(); + + if !relative.is_empty() && module.is_none() { + // For relative-only imports relocate the space after the final dot to be owned + // by the import token. + if let Some(Dot { + whitespace_after: ParenthesizableWhitespace::SimpleWhitespace(dot_ws), + .. + }) = relative.last_mut() + { + swap(dot_ws, &mut whitespace_before_import); + } + } else { + whitespace_before_import = parse_simple_whitespace( + config, + &mut (*self.import_tok).whitespace_before.borrow_mut(), + )?; + } + + let lpar = self.lpar.inflate(config)?; + let names = self.names.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + + let semicolon = self.semicolon.inflate(config)?; + + Ok(Self::Inflated { + module, + names, + relative, + lpar, + rpar, + semicolon, + whitespace_after_from, + whitespace_before_import, + whitespace_after_import, + }) + } +} + +fn inflate_dots<'r, 'a>( + dots: Vec>, + config: &Config<'a>, +) -> Result>> { + let mut ret: Vec> = vec![]; + let mut last_tok: Option> = None; + for dot in dots { + if let Some(last_tokref) = &last_tok { + // Consecutive dots having the same Token can only happen if `...` was + // parsed as a single ELLIPSIS token. In this case the token's + // whitespace_before belongs to the first dot, but the whitespace_after is + // moved to the 3rd dot (by swapping it twice) + if last_tokref.start_pos == dot.tok.start_pos { + let mut subsequent_dot = Dot { + whitespace_before: Default::default(), + whitespace_after: Default::default(), + }; + swap( + &mut ret.last_mut().unwrap().whitespace_after, + &mut subsequent_dot.whitespace_after, + ); + ret.push(subsequent_dot); + continue; + } + } + last_tok = Some(dot.tok); + ret.push(dot.inflate(config)?); + } + Ok(ret) +} + +impl<'r, 'a> DeflatedImportFrom<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct ImportAlias<'a> { + pub name: NameOrAttribute<'a>, + pub asname: Option>, + pub comma: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedImportAlias<'r, 'a> { + type Inflated = ImportAlias<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let name = self.name.inflate(config)?; + let asname = self.asname.inflate(config)?; + let comma = self.comma.inflate(config)?; + Ok(Self::Inflated { + name, + asname, + comma, + }) + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedImportAlias<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + let comma = Some(comma); + Self { comma, ..self } + } +} + +impl<'a> Codegen<'a> for ImportAlias<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.name.codegen(state); + if let Some(asname) = &self.asname { + asname.codegen(state); + } + if let Some(comma) = &self.comma { + comma.codegen(state); + } + } +} + +#[cst_node] +pub struct AsName<'a> { + pub name: AssignTargetExpression<'a>, + pub whitespace_before_as: ParenthesizableWhitespace<'a>, + pub whitespace_after_as: ParenthesizableWhitespace<'a>, + + pub(crate) as_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for AsName<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace_before_as.codegen(state); + state.add_token("as"); + self.whitespace_after_as.codegen(state); + self.name.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedAsName<'r, 'a> { + type Inflated = AsName<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before_as = parse_parenthesizable_whitespace( + config, + &mut (*self.as_tok).whitespace_before.borrow_mut(), + )?; + let whitespace_after_as = parse_parenthesizable_whitespace( + config, + &mut (*self.as_tok).whitespace_after.borrow_mut(), + )?; + let name = self.name.inflate(config)?; + Ok(Self::Inflated { + name, + whitespace_before_as, + whitespace_after_as, + }) + } +} + +#[cst_node(Inflate)] +pub enum ImportNames<'a> { + Star(ImportStar), + Aliases(Vec>), +} + +impl<'a> Codegen<'a> for ImportNames<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + match self { + Self::Star(s) => s.codegen(state), + Self::Aliases(aliases) => { + for (i, alias) in aliases.iter().enumerate() { + alias.codegen(state); + if alias.comma.is_none() && i < aliases.len() - 1 { + state.add_token(", "); + } + } + } + } + } +} + +#[cst_node] +pub struct FunctionDef<'a> { + pub name: Name<'a>, + pub type_parameters: Option>, + pub params: Parameters<'a>, + pub body: Suite<'a>, + pub decorators: Vec>, + pub returns: Option>, + pub asynchronous: Option>, + pub leading_lines: Vec>, + pub lines_after_decorators: Vec>, + pub whitespace_after_def: SimpleWhitespace<'a>, + pub whitespace_after_name: SimpleWhitespace<'a>, + pub whitespace_after_type_parameters: SimpleWhitespace<'a>, + pub whitespace_before_params: ParenthesizableWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) async_tok: Option>, + pub(crate) def_tok: TokenRef<'a>, + pub(crate) open_paren_tok: TokenRef<'a>, + pub(crate) close_paren_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'r, 'a> DeflatedFunctionDef<'r, 'a> { + pub fn with_decorators(self, decorators: Vec>) -> Self { + Self { decorators, ..self } + } +} + +impl<'a> Codegen<'a> for FunctionDef<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for l in &self.leading_lines { + l.codegen(state); + } + for dec in self.decorators.iter() { + dec.codegen(state); + } + for l in &self.lines_after_decorators { + l.codegen(state); + } + state.add_indent(); + + if let Some(asy) = &self.asynchronous { + asy.codegen(state); + } + state.add_token("def"); + self.whitespace_after_def.codegen(state); + self.name.codegen(state); + self.whitespace_after_name.codegen(state); + + if let Some(tp) = &self.type_parameters { + tp.codegen(state); + self.whitespace_after_type_parameters.codegen(state); + } + + state.add_token("("); + self.whitespace_before_params.codegen(state); + self.params.codegen(state); + state.add_token(")"); + + if let Some(ann) = &self.returns { + ann.codegen(state, "->"); + } + + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedFunctionDef<'r, 'a> { + type Inflated = FunctionDef<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let mut decorators = self.decorators.inflate(config)?; + let (asynchronous, leading_lines) = if let Some(asy) = self.async_tok.as_mut() { + let whitespace_after = + parse_parenthesizable_whitespace(config, &mut asy.whitespace_after.borrow_mut())?; + ( + Some(Asynchronous { whitespace_after }), + Some(parse_empty_lines( + config, + &mut asy.whitespace_before.borrow_mut(), + None, + )?), + ) + } else { + (None, None) + }; + + let mut leading_lines = if let Some(ll) = leading_lines { + ll + } else { + parse_empty_lines( + config, + &mut (*self.def_tok).whitespace_before.borrow_mut(), + None, + )? + }; + + let mut lines_after_decorators = Default::default(); + + if let Some(dec) = decorators.first_mut() { + swap(&mut lines_after_decorators, &mut leading_lines); + swap(&mut dec.leading_lines, &mut leading_lines); + } + + let whitespace_after_def = + parse_simple_whitespace(config, &mut (*self.def_tok).whitespace_after.borrow_mut())?; + + let name = self.name.inflate(config)?; + + let whitespace_after_name; + let mut type_parameters = Default::default(); + let mut whitespace_after_type_parameters = Default::default(); + + if let Some(tp) = self.type_parameters { + let rbracket_tok = tp.rbracket.tok.clone(); + whitespace_after_name = parse_simple_whitespace( + config, + &mut tp.lbracket.tok.whitespace_before.borrow_mut(), + )?; + type_parameters = Some(tp.inflate(config)?); + whitespace_after_type_parameters = + parse_simple_whitespace(config, &mut rbracket_tok.whitespace_after.borrow_mut())?; + } else { + whitespace_after_name = parse_simple_whitespace( + config, + &mut self.open_paren_tok.whitespace_before.borrow_mut(), + )?; + } + + let whitespace_before_params = parse_parenthesizable_whitespace( + config, + &mut (*self.open_paren_tok).whitespace_after.borrow_mut(), + )?; + let mut params = self.params.inflate(config)?; + adjust_parameters_trailing_whitespace(config, &mut params, &self.close_paren_tok)?; + + let returns = self.returns.inflate(config)?; + let whitespace_before_colon = parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + + let body = self.body.inflate(config)?; + Ok(Self::Inflated { + name, + type_parameters, + params, + body, + decorators, + returns, + asynchronous, + leading_lines, + lines_after_decorators, + whitespace_after_def, + whitespace_after_name, + whitespace_after_type_parameters, + whitespace_before_params, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct Decorator<'a> { + pub decorator: Expression<'a>, + pub leading_lines: Vec>, + pub whitespace_after_at: SimpleWhitespace<'a>, + pub trailing_whitespace: TrailingWhitespace<'a>, + + pub(crate) at_tok: TokenRef<'a>, + pub(crate) newline_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Decorator<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in self.leading_lines.iter() { + ll.codegen(state); + } + state.add_indent(); + state.add_token("@"); + self.whitespace_after_at.codegen(state); + self.decorator.codegen(state); + self.trailing_whitespace.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedDecorator<'r, 'a> { + type Inflated = Decorator<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.at_tok).whitespace_before.borrow_mut(), + None, + )?; + let whitespace_after_at = + parse_simple_whitespace(config, &mut (*self.at_tok).whitespace_after.borrow_mut())?; + let decorator = self.decorator.inflate(config)?; + let trailing_whitespace = parse_trailing_whitespace( + config, + &mut (*self.newline_tok).whitespace_before.borrow_mut(), + )?; + Ok(Self::Inflated { + decorator, + leading_lines, + whitespace_after_at, + trailing_whitespace, + }) + } +} + +#[cst_node] +pub struct If<'a> { + /// The expression that, when evaluated, should give us a truthy value + pub test: Expression<'a>, + // The body of this compound statement. + pub body: Suite<'a>, + + /// An optional ``elif`` or ``else`` clause. ``If`` signifies an ``elif`` block. + pub orelse: Option>>, + + /// Sequence of empty lines appearing before this compound statement line. + pub leading_lines: Vec>, + + /// The whitespace appearing after the ``if`` keyword but before the test + /// expression. + pub whitespace_before_test: SimpleWhitespace<'a>, + + /// The whitespace appearing after the test expression but before the colon. + pub whitespace_after_test: SimpleWhitespace<'a>, + + /// Signifies if this instance represents an ``elif`` or an ``if`` block. + #[cfg_attr(feature = "py", skip_py)] + pub is_elif: bool, + + pub(crate) if_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for If<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for l in &self.leading_lines { + l.codegen(state); + } + state.add_indent(); + + state.add_token(if self.is_elif { "elif" } else { "if" }); + self.whitespace_before_test.codegen(state); + self.test.codegen(state); + self.whitespace_after_test.codegen(state); + state.add_token(":"); + self.body.codegen(state); + if let Some(orelse) = &self.orelse { + orelse.codegen(state) + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedIf<'r, 'a> { + type Inflated = If<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.if_tok).whitespace_before.borrow_mut(), + None, + )?; + let whitespace_before_test = + parse_simple_whitespace(config, &mut (*self.if_tok).whitespace_after.borrow_mut())?; + let test = self.test.inflate(config)?; + let whitespace_after_test = parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + let body = self.body.inflate(config)?; + let orelse = self.orelse.inflate(config)?; + + Ok(Self::Inflated { + test, + body, + orelse, + leading_lines, + whitespace_before_test, + whitespace_after_test, + is_elif: self.is_elif, + }) + } +} + +#[allow(clippy::large_enum_variant)] +#[cst_node(Inflate, Codegen)] +pub enum OrElse<'a> { + Elif(If<'a>), + Else(Else<'a>), +} + +#[cst_node] +pub struct Else<'a> { + pub body: Suite<'a>, + /// Sequence of empty lines appearing before this compound statement line. + pub leading_lines: Vec>, + /// The whitespace appearing after the ``else`` keyword but before the colon. + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) else_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Else<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for l in &self.leading_lines { + l.codegen(state); + } + state.add_indent(); + + state.add_token("else"); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedElse<'r, 'a> { + type Inflated = Else<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.else_tok).whitespace_before.borrow_mut(), + None, + )?; + let whitespace_before_colon = parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + let body = self.body.inflate(config)?; + + Ok(Self::Inflated { + body, + leading_lines, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct Annotation<'a> { + pub annotation: Expression<'a>, + pub whitespace_before_indicator: Option>, + pub whitespace_after_indicator: ParenthesizableWhitespace<'a>, + + pub(crate) tok: TokenRef<'a>, +} + +impl<'a> Annotation<'a> { + pub fn codegen(&self, state: &mut CodegenState<'a>, default_indicator: &'a str) { + if let Some(ws) = &self.whitespace_before_indicator { + ws.codegen(state); + } else if default_indicator == "->" { + state.add_token(" "); + } else { + panic!("Variable annotation but whitespace is None"); + } + + state.add_token(default_indicator); + self.whitespace_after_indicator.codegen(state); + self.annotation.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedAnnotation<'r, 'a> { + type Inflated = Annotation<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_before_indicator = Some(parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_before.borrow_mut(), + )?); + let whitespace_after_indicator = parse_parenthesizable_whitespace( + config, + &mut (*self.tok).whitespace_after.borrow_mut(), + )?; + let annotation = self.annotation.inflate(config)?; + Ok(Self::Inflated { + annotation, + whitespace_before_indicator, + whitespace_after_indicator, + }) + } +} + +#[cst_node] +pub struct AnnAssign<'a> { + pub target: AssignTargetExpression<'a>, + pub annotation: Annotation<'a>, + pub value: Option>, + pub equal: Option>, + pub semicolon: Option>, +} + +impl<'a> Codegen<'a> for AnnAssign<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.target.codegen(state); + self.annotation.codegen(state, ":"); + if let Some(eq) = &self.equal { + eq.codegen(state); + } else if self.value.is_some() { + state.add_token(" = "); + } + if let Some(value) = &self.value { + value.codegen(state); + } + + if let Some(semi) = &self.semicolon { + semi.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedAnnAssign<'r, 'a> { + type Inflated = AnnAssign<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let target = self.target.inflate(config)?; + let annotation = self.annotation.inflate(config)?; + let value = self.value.inflate(config)?; + let equal = self.equal.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + target, + annotation, + value, + equal, + semicolon, + }) + } +} + +impl<'r, 'a> DeflatedAnnAssign<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct Return<'a> { + pub value: Option>, + pub whitespace_after_return: Option>, + pub semicolon: Option>, + + pub(crate) return_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Return<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("return"); + if let Some(ws) = &self.whitespace_after_return { + ws.codegen(state); + } else if self.value.is_some() { + state.add_token(" "); + } + + if let Some(val) = &self.value { + val.codegen(state); + } + if let Some(semi) = &self.semicolon { + semi.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedReturn<'r, 'a> { + type Inflated = Return<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_return = if self.value.is_some() { + Some(parse_simple_whitespace( + config, + &mut (*self.return_tok).whitespace_after.borrow_mut(), + )?) + } else { + // otherwise space is owned by semicolon or small statement + // whitespace is not None to preserve a quirk of the pure python parser + Some(Default::default()) + }; + let value = self.value.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + value, + whitespace_after_return, + semicolon, + }) + } +} + +impl<'r, 'a> DeflatedReturn<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct Assert<'a> { + pub test: Expression<'a>, + pub msg: Option>, + pub comma: Option>, + pub whitespace_after_assert: SimpleWhitespace<'a>, + pub semicolon: Option>, + + pub(crate) assert_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Assert<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("assert"); + self.whitespace_after_assert.codegen(state); + self.test.codegen(state); + if let Some(comma) = &self.comma { + comma.codegen(state); + } else if self.msg.is_some() { + state.add_token(", "); + } + if let Some(msg) = &self.msg { + msg.codegen(state); + } + if let Some(semi) = &self.semicolon { + semi.codegen(state); + } + } +} +impl<'r, 'a> Inflate<'a> for DeflatedAssert<'r, 'a> { + type Inflated = Assert<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_assert = parse_simple_whitespace( + config, + &mut (*self.assert_tok).whitespace_after.borrow_mut(), + )?; + + let test = self.test.inflate(config)?; + let comma = self.comma.inflate(config)?; + let msg = self.msg.inflate(config)?; + + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + test, + msg, + comma, + whitespace_after_assert, + semicolon, + }) + } +} + +impl<'r, 'a> DeflatedAssert<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct Raise<'a> { + pub exc: Option>, + pub cause: Option>, + pub whitespace_after_raise: Option>, + pub semicolon: Option>, + + pub(crate) raise_tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedRaise<'r, 'a> { + type Inflated = Raise<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_raise = if self.exc.is_some() { + Some(parse_simple_whitespace( + config, + &mut (*self.raise_tok).whitespace_after.borrow_mut(), + )?) + } else { + Default::default() + }; + + let exc = self.exc.inflate(config)?; + let mut cause = self.cause.inflate(config)?; + if exc.is_none() { + if let Some(cause) = cause.as_mut() { + // in `raise from`, `raise` owns the shared whitespace + cause.whitespace_before_from = None; + } + } + let semicolon = self.semicolon.inflate(config)?; + + Ok(Self::Inflated { + exc, + cause, + whitespace_after_raise, + semicolon, + }) + } +} + +impl<'a> Codegen<'a> for Raise<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("raise"); + if let Some(ws) = &self.whitespace_after_raise { + ws.codegen(state); + } else if self.exc.is_some() { + state.add_token(" "); + } + + if let Some(exc) = &self.exc { + exc.codegen(state); + } + + if let Some(cause) = &self.cause { + cause.codegen(state, " "); + } + + if let Some(semi) = &self.semicolon { + semi.codegen(state); + } + } +} + +impl<'r, 'a> DeflatedRaise<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct NameItem<'a> { + pub name: Name<'a>, + pub comma: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedNameItem<'r, 'a> { + type Inflated = NameItem<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let name = self.name.inflate(config)?; + let comma = self.comma.inflate(config)?; + Ok(Self::Inflated { name, comma }) + } +} + +impl<'a> NameItem<'a> { + fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { + self.name.codegen(state); + if let Some(comma) = &self.comma { + comma.codegen(state); + } else if default_comma { + state.add_token(", "); + } + } +} + +#[cst_node] +pub struct Global<'a> { + pub names: Vec>, + pub whitespace_after_global: SimpleWhitespace<'a>, + pub semicolon: Option>, + + pub(crate) tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedGlobal<'r, 'a> { + type Inflated = Global<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_global = + parse_simple_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut())?; + let names = self.names.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + names, + whitespace_after_global, + semicolon, + }) + } +} + +impl<'a> Codegen<'a> for Global<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("global"); + self.whitespace_after_global.codegen(state); + let len = self.names.len(); + for (i, name) in self.names.iter().enumerate() { + name.codegen(state, i + 1 != len); + } + + if let Some(semicolon) = &self.semicolon { + semicolon.codegen(state); + } + } +} + +impl<'r, 'a> DeflatedGlobal<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct Nonlocal<'a> { + pub names: Vec>, + pub whitespace_after_nonlocal: SimpleWhitespace<'a>, + pub semicolon: Option>, + + pub(crate) tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedNonlocal<'r, 'a> { + type Inflated = Nonlocal<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_nonlocal = + parse_simple_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut())?; + let names = self.names.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + names, + whitespace_after_nonlocal, + semicolon, + }) + } +} + +impl<'a> Codegen<'a> for Nonlocal<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("nonlocal"); + self.whitespace_after_nonlocal.codegen(state); + let len = self.names.len(); + for (i, name) in self.names.iter().enumerate() { + name.codegen(state, i + 1 != len); + } + + if let Some(semicolon) = &self.semicolon { + semicolon.codegen(state); + } + } +} + +impl<'r, 'a> DeflatedNonlocal<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct For<'a> { + pub target: AssignTargetExpression<'a>, + pub iter: Expression<'a>, + pub body: Suite<'a>, + pub orelse: Option>, + pub asynchronous: Option>, + + pub leading_lines: Vec>, + pub whitespace_after_for: SimpleWhitespace<'a>, + pub whitespace_before_in: SimpleWhitespace<'a>, + pub whitespace_after_in: SimpleWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) async_tok: Option>, + pub(crate) for_tok: TokenRef<'a>, + pub(crate) in_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for For<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + state.add_indent(); + + if let Some(asy) = &self.asynchronous { + asy.codegen(state); + } + state.add_token("for"); + self.whitespace_after_for.codegen(state); + self.target.codegen(state); + self.whitespace_before_in.codegen(state); + state.add_token("in"); + self.whitespace_after_in.codegen(state); + self.iter.codegen(state); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + if let Some(e) = &self.orelse { + e.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedFor<'r, 'a> { + type Inflated = For<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let (asynchronous, leading_lines) = if let Some(asy) = self.async_tok.as_mut() { + let whitespace_after = + parse_parenthesizable_whitespace(config, &mut asy.whitespace_after.borrow_mut())?; + ( + Some(Asynchronous { whitespace_after }), + Some(parse_empty_lines( + config, + &mut asy.whitespace_before.borrow_mut(), + None, + )?), + ) + } else { + (None, None) + }; + let leading_lines = if let Some(ll) = leading_lines { + ll + } else { + parse_empty_lines( + config, + &mut (*self.for_tok).whitespace_before.borrow_mut(), + None, + )? + }; + let whitespace_after_for = + parse_simple_whitespace(config, &mut (*self.for_tok).whitespace_after.borrow_mut())?; + let target = self.target.inflate(config)?; + let whitespace_before_in = + parse_simple_whitespace(config, &mut (*self.in_tok).whitespace_before.borrow_mut())?; + let whitespace_after_in = + parse_simple_whitespace(config, &mut (*self.in_tok).whitespace_after.borrow_mut())?; + let iter = self.iter.inflate(config)?; + let whitespace_before_colon = parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + + let body = self.body.inflate(config)?; + let orelse = self.orelse.inflate(config)?; + + Ok(Self::Inflated { + target, + iter, + body, + orelse, + asynchronous, + leading_lines, + whitespace_after_for, + whitespace_before_in, + whitespace_after_in, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct While<'a> { + pub test: Expression<'a>, + pub body: Suite<'a>, + pub orelse: Option>, + pub leading_lines: Vec>, + pub whitespace_after_while: SimpleWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) while_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for While<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + state.add_indent(); + + state.add_token("while"); + self.whitespace_after_while.codegen(state); + self.test.codegen(state); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + if let Some(orelse) = &self.orelse { + orelse.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedWhile<'r, 'a> { + type Inflated = While<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.while_tok).whitespace_before.borrow_mut(), + None, + )?; + let whitespace_after_while = + parse_simple_whitespace(config, &mut (*self.while_tok).whitespace_after.borrow_mut())?; + let test = self.test.inflate(config)?; + let whitespace_before_colon = parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + let body = self.body.inflate(config)?; + let orelse = self.orelse.inflate(config)?; + + Ok(Self::Inflated { + test, + body, + orelse, + leading_lines, + whitespace_after_while, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct ClassDef<'a> { + pub name: Name<'a>, + pub type_parameters: Option>, + pub body: Suite<'a>, + pub bases: Vec>, + pub keywords: Vec>, + pub decorators: Vec>, + pub lpar: Option>, + pub rpar: Option>, + pub leading_lines: Vec>, + pub lines_after_decorators: Vec>, + pub whitespace_after_class: SimpleWhitespace<'a>, + pub whitespace_after_name: SimpleWhitespace<'a>, + pub whitespace_after_type_parameters: SimpleWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) class_tok: TokenRef<'a>, + pub(crate) lpar_tok: Option>, + pub(crate) rpar_tok: Option>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for ClassDef<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + for dec in &self.decorators { + dec.codegen(state); + } + for lad in &self.lines_after_decorators { + lad.codegen(state); + } + state.add_indent(); + + state.add_token("class"); + self.whitespace_after_class.codegen(state); + self.name.codegen(state); + self.whitespace_after_name.codegen(state); + + if let Some(tp) = &self.type_parameters { + tp.codegen(state); + self.whitespace_after_type_parameters.codegen(state); + } + + let need_parens = !self.bases.is_empty() || !self.keywords.is_empty(); + + if let Some(lpar) = &self.lpar { + lpar.codegen(state); + } else if need_parens { + state.add_token("("); + } + let args = self.bases.iter().chain(self.keywords.iter()); + let len = self.bases.len() + self.keywords.len(); + for (i, arg) in args.enumerate() { + arg.codegen(state, i + 1 < len); + } + + if let Some(rpar) = &self.rpar { + rpar.codegen(state); + } else if need_parens { + state.add_token(")"); + } + + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedClassDef<'r, 'a> { + type Inflated = ClassDef<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let mut leading_lines = parse_empty_lines( + config, + &mut (*self.class_tok).whitespace_before.borrow_mut(), + None, + )?; + let mut decorators = self.decorators.inflate(config)?; + let mut lines_after_decorators = Default::default(); + if let Some(dec) = decorators.first_mut() { + swap(&mut lines_after_decorators, &mut leading_lines); + swap(&mut dec.leading_lines, &mut leading_lines); + } + + let whitespace_after_class = + parse_simple_whitespace(config, &mut (*self.class_tok).whitespace_after.borrow_mut())?; + let name = self.name.inflate(config)?; + + let (mut whitespace_after_name, mut type_parameters, mut whitespace_after_type_parameters) = + Default::default(); + + if let Some(tparams) = self.type_parameters { + let rbracket_tok = tparams.rbracket.tok.clone(); + whitespace_after_name = parse_simple_whitespace( + config, + &mut tparams.lbracket.tok.whitespace_before.borrow_mut(), + )?; + type_parameters = Some(tparams.inflate(config)?); + whitespace_after_type_parameters = + parse_simple_whitespace(config, &mut rbracket_tok.whitespace_after.borrow_mut())?; + } else if let Some(lpar_tok) = self.lpar_tok.as_mut() { + whitespace_after_name = + parse_simple_whitespace(config, &mut lpar_tok.whitespace_before.borrow_mut())?; + } + + let lpar = self.lpar.inflate(config)?; + let bases = self.bases.inflate(config)?; + let keywords = self.keywords.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + + let whitespace_before_colon = parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + let body = self.body.inflate(config)?; + + Ok(Self::Inflated { + name, + type_parameters, + body, + bases, + keywords, + decorators, + lpar, + rpar, + leading_lines, + lines_after_decorators, + whitespace_after_class, + whitespace_after_type_parameters, + whitespace_after_name, + whitespace_before_colon, + }) + } +} + +impl<'r, 'a> DeflatedClassDef<'r, 'a> { + pub fn with_decorators(self, decorators: Vec>) -> Self { + Self { decorators, ..self } + } +} + +#[cst_node] +pub struct Finally<'a> { + pub body: Suite<'a>, + pub leading_lines: Vec>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) finally_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Finally<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + state.add_indent(); + + state.add_token("finally"); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedFinally<'r, 'a> { + type Inflated = Finally<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.finally_tok).whitespace_before.borrow_mut(), + None, + )?; + let whitespace_before_colon = parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + let body = self.body.inflate(config)?; + Ok(Self::Inflated { + body, + leading_lines, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct ExceptHandler<'a> { + pub body: Suite<'a>, + pub r#type: Option>, + pub name: Option>, + pub leading_lines: Vec>, + pub whitespace_after_except: SimpleWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) except_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for ExceptHandler<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + state.add_indent(); + + state.add_token("except"); + self.whitespace_after_except.codegen(state); + if let Some(t) = &self.r#type { + t.codegen(state); + } + if let Some(n) = &self.name { + n.codegen(state); + } + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedExceptHandler<'r, 'a> { + type Inflated = ExceptHandler<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.except_tok).whitespace_before.borrow_mut(), + None, + )?; + let whitespace_after_except = parse_simple_whitespace( + config, + &mut (*self.except_tok).whitespace_after.borrow_mut(), + )?; + + let r#type = self.r#type.inflate(config)?; + let name = self.name.inflate(config)?; + let whitespace_before_colon = if name.is_some() { + parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )? + } else { + Default::default() + }; + + let body = self.body.inflate(config)?; + Ok(Self::Inflated { + body, + r#type, + name, + leading_lines, + whitespace_after_except, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct ExceptStarHandler<'a> { + pub body: Suite<'a>, + pub r#type: Expression<'a>, + pub name: Option>, + pub leading_lines: Vec>, + pub whitespace_after_except: SimpleWhitespace<'a>, + pub whitespace_after_star: SimpleWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) except_tok: TokenRef<'a>, + pub(crate) star_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for ExceptStarHandler<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + state.add_indent(); + + state.add_token("except"); + self.whitespace_after_except.codegen(state); + state.add_token("*"); + self.whitespace_after_star.codegen(state); + self.r#type.codegen(state); + if let Some(n) = &self.name { + n.codegen(state); + } + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedExceptStarHandler<'r, 'a> { + type Inflated = ExceptStarHandler<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut self.except_tok.whitespace_before.borrow_mut(), + None, + )?; + let whitespace_after_except = + parse_simple_whitespace(config, &mut self.except_tok.whitespace_after.borrow_mut())?; + let whitespace_after_star = + parse_simple_whitespace(config, &mut self.star_tok.whitespace_after.borrow_mut())?; + + let r#type = self.r#type.inflate(config)?; + let name = self.name.inflate(config)?; + let whitespace_before_colon = if name.is_some() { + parse_simple_whitespace(config, &mut self.colon_tok.whitespace_before.borrow_mut())? + } else { + Default::default() + }; + + let body = self.body.inflate(config)?; + Ok(Self::Inflated { + body, + r#type, + name, + leading_lines, + whitespace_after_except, + whitespace_after_star, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct Try<'a> { + pub body: Suite<'a>, + pub handlers: Vec>, + pub orelse: Option>, + pub finalbody: Option>, + pub leading_lines: Vec>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) try_tok: TokenRef<'a>, + // colon_tok unnecessary +} + +impl<'a> Codegen<'a> for Try<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + state.add_indent(); + state.add_token("try"); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + for h in &self.handlers { + h.codegen(state); + } + if let Some(e) = &self.orelse { + e.codegen(state); + } + if let Some(f) = &self.finalbody { + f.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedTry<'r, 'a> { + type Inflated = Try<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.try_tok).whitespace_before.borrow_mut(), + None, + )?; + let whitespace_before_colon = + parse_simple_whitespace(config, &mut (*self.try_tok).whitespace_after.borrow_mut())?; + let body = self.body.inflate(config)?; + let handlers = self.handlers.inflate(config)?; + let orelse = self.orelse.inflate(config)?; + let finalbody = self.finalbody.inflate(config)?; + Ok(Self::Inflated { + body, + handlers, + orelse, + finalbody, + leading_lines, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct TryStar<'a> { + pub body: Suite<'a>, + pub handlers: Vec>, + pub orelse: Option>, + pub finalbody: Option>, + pub leading_lines: Vec>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) try_tok: TokenRef<'a>, + // colon_tok unnecessary +} + +impl<'a> Codegen<'a> for TryStar<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + state.add_indent(); + state.add_token("try"); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + for h in &self.handlers { + h.codegen(state); + } + if let Some(e) = &self.orelse { + e.codegen(state); + } + if let Some(f) = &self.finalbody { + f.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedTryStar<'r, 'a> { + type Inflated = TryStar<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut (*self.try_tok).whitespace_before.borrow_mut(), + None, + )?; + let whitespace_before_colon = + parse_simple_whitespace(config, &mut (*self.try_tok).whitespace_after.borrow_mut())?; + let body = self.body.inflate(config)?; + let handlers = self.handlers.inflate(config)?; + let orelse = self.orelse.inflate(config)?; + let finalbody = self.finalbody.inflate(config)?; + Ok(Self::Inflated { + body, + handlers, + orelse, + finalbody, + leading_lines, + whitespace_before_colon, + }) + } +} + +#[cst_node] +pub struct AugAssign<'a> { + pub target: AssignTargetExpression<'a>, + pub operator: AugOp<'a>, + pub value: Expression<'a>, + pub semicolon: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedAugAssign<'r, 'a> { + type Inflated = AugAssign<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let target = self.target.inflate(config)?; + let operator = self.operator.inflate(config)?; + let value = self.value.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + target, + operator, + value, + semicolon, + }) + } +} + +impl<'a> Codegen<'a> for AugAssign<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.target.codegen(state); + self.operator.codegen(state); + self.value.codegen(state); + + if let Some(s) = &self.semicolon { + s.codegen(state); + } + } +} + +impl<'r, 'a> DeflatedAugAssign<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct WithItem<'a> { + pub item: Expression<'a>, + pub asname: Option>, + pub comma: Option>, +} + +impl<'r, 'a> DeflatedWithItem<'r, 'a> { + fn inflate_withitem(self, config: &Config<'a>, is_last: bool) -> Result> { + let item = self.item.inflate(config)?; + let asname = self.asname.inflate(config)?; + let comma = if is_last { + self.comma.map(|c| c.inflate_before(config)).transpose()? + } else { + self.comma.map(|c| c.inflate(config)).transpose()? + }; + Ok(WithItem { + item, + asname, + comma, + }) + } +} + +impl<'a> Codegen<'a> for WithItem<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.item.codegen(state); + if let Some(n) = &self.asname { + n.codegen(state); + } + if let Some(c) = &self.comma { + c.codegen(state); + } + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedWithItem<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + Self { + comma: Some(comma), + ..self + } + } +} + +#[cst_node] +pub struct With<'a> { + pub items: Vec>, + pub body: Suite<'a>, + pub asynchronous: Option>, + pub leading_lines: Vec>, + pub lpar: Option>, + pub rpar: Option>, + pub whitespace_after_with: SimpleWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) async_tok: Option>, + pub(crate) with_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for With<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for ll in &self.leading_lines { + ll.codegen(state); + } + state.add_indent(); + + if let Some(asy) = &self.asynchronous { + asy.codegen(state); + } + state.add_token("with"); + self.whitespace_after_with.codegen(state); + + // TODO: Force parens whenever there are newlines in + // the commas of self.items. + // + // For now, only the python API does this. + let need_parens = false; + if let Some(lpar) = &self.lpar { + lpar.codegen(state); + } else if need_parens { + state.add_token("("); + } + + let len = self.items.len(); + for (i, item) in self.items.iter().enumerate() { + item.codegen(state); + if item.comma.is_none() && i + 1 < len { + state.add_token(", "); + } + } + + if let Some(rpar) = &self.rpar { + rpar.codegen(state); + } else if need_parens { + state.add_token(")"); + } + + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedWith<'r, 'a> { + type Inflated = With<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let (asynchronous, leading_lines) = if let Some(asy) = self.async_tok.as_mut() { + let whitespace_after = + parse_parenthesizable_whitespace(config, &mut asy.whitespace_after.borrow_mut())?; + ( + Some(Asynchronous { whitespace_after }), + Some(parse_empty_lines( + config, + &mut asy.whitespace_before.borrow_mut(), + None, + )?), + ) + } else { + (None, None) + }; + + let leading_lines = if let Some(ll) = leading_lines { + ll + } else { + parse_empty_lines( + config, + &mut (*self.with_tok).whitespace_before.borrow_mut(), + None, + )? + }; + + let whitespace_after_with = + parse_simple_whitespace(config, &mut (*self.with_tok).whitespace_after.borrow_mut())?; + let lpar = self.lpar.map(|lpar| lpar.inflate(config)).transpose()?; + let len = self.items.len(); + let items = self + .items + .into_iter() + .enumerate() + .map(|(idx, el)| el.inflate_withitem(config, idx + 1 == len)) + .collect::>>()?; + let rpar = if !items.is_empty() { + // rpar only has whitespace if items is non empty + self.rpar.map(|rpar| rpar.inflate(config)).transpose()? + } else { + Default::default() + }; + let whitespace_before_colon = parse_simple_whitespace( + config, + &mut (*self.colon_tok).whitespace_before.borrow_mut(), + )?; + let body = self.body.inflate(config)?; + + Ok(Self::Inflated { + items, + body, + asynchronous, + leading_lines, + lpar, + rpar, + whitespace_after_with, + whitespace_before_colon, + }) + } +} + +#[cst_node(Codegen, ParenthesizedNode, Inflate)] +pub enum DelTargetExpression<'a> { + Name(Box>), + Attribute(Box>), + Tuple(Box>), + List(Box>), + Subscript(Box>), +} + +impl<'r, 'a> std::convert::From> + for DeflatedExpression<'r, 'a> +{ + fn from(d: DeflatedDelTargetExpression<'r, 'a>) -> Self { + match d { + DeflatedDelTargetExpression::Attribute(a) => Self::Attribute(a), + DeflatedDelTargetExpression::List(l) => Self::List(l), + DeflatedDelTargetExpression::Name(n) => Self::Name(n), + DeflatedDelTargetExpression::Subscript(s) => Self::Subscript(s), + DeflatedDelTargetExpression::Tuple(t) => Self::Tuple(t), + } + } +} +impl<'r, 'a> std::convert::From> for DeflatedElement<'r, 'a> { + fn from(d: DeflatedDelTargetExpression<'r, 'a>) -> Self { + Self::Simple { + value: d.into(), + comma: None, + } + } +} + +#[cst_node] +pub struct Del<'a> { + pub target: DelTargetExpression<'a>, + pub whitespace_after_del: SimpleWhitespace<'a>, + pub semicolon: Option>, + + pub(crate) tok: TokenRef<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedDel<'r, 'a> { + type Inflated = Del<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_del = + parse_simple_whitespace(config, &mut (*self.tok).whitespace_after.borrow_mut())?; + let target = self.target.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + target, + whitespace_after_del, + semicolon, + }) + } +} + +impl<'a> Codegen<'a> for Del<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("del"); + self.whitespace_after_del.codegen(state); + self.target.codegen(state); + if let Some(semi) = &self.semicolon { + semi.codegen(state); + } + } +} + +impl<'r, 'a> DeflatedDel<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} + +#[cst_node] +pub struct Match<'a> { + pub subject: Expression<'a>, + pub cases: Vec>, + + pub leading_lines: Vec>, + pub whitespace_after_match: SimpleWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + pub whitespace_after_colon: TrailingWhitespace<'a>, + pub indent: Option<&'a str>, + pub footer: Vec>, + + pub(crate) match_tok: TokenRef<'a>, + pub(crate) colon_tok: TokenRef<'a>, + pub(crate) indent_tok: TokenRef<'a>, + pub(crate) dedent_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for Match<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for l in &self.leading_lines { + l.codegen(state); + } + state.add_indent(); + state.add_token("match"); + self.whitespace_after_match.codegen(state); + self.subject.codegen(state); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.whitespace_after_colon.codegen(state); + + let indent = self.indent.unwrap_or(state.default_indent); + state.indent(indent); + + // Note: empty cases is a syntax error + for c in &self.cases { + c.codegen(state); + } + + for f in &self.footer { + f.codegen(state); + } + state.dedent(); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatch<'r, 'a> { + type Inflated = Match<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut self.match_tok.whitespace_before.borrow_mut(), + None, + )?; + let whitespace_after_match = + parse_simple_whitespace(config, &mut self.match_tok.whitespace_after.borrow_mut())?; + let subject = self.subject.inflate(config)?; + let whitespace_before_colon = + parse_simple_whitespace(config, &mut self.colon_tok.whitespace_before.borrow_mut())?; + let whitespace_after_colon = + parse_trailing_whitespace(config, &mut self.colon_tok.whitespace_after.borrow_mut())?; + let mut indent = self.indent_tok.relative_indent; + if indent == Some(config.default_indent) { + indent = None; + } + let cases = self.cases.inflate(config)?; + // See note about footers in `IndentedBlock`'s inflate fn + let footer = parse_empty_lines( + config, + &mut self.dedent_tok.whitespace_after.borrow_mut(), + Some(self.indent_tok.whitespace_before.borrow().absolute_indent), + )?; + Ok(Self::Inflated { + subject, + cases, + leading_lines, + whitespace_after_match, + whitespace_before_colon, + whitespace_after_colon, + indent, + footer, + }) + } +} + +#[cst_node] +pub struct MatchCase<'a> { + pub pattern: MatchPattern<'a>, + pub guard: Option>, + pub body: Suite<'a>, + + pub leading_lines: Vec>, + pub whitespace_after_case: SimpleWhitespace<'a>, + pub whitespace_before_if: SimpleWhitespace<'a>, + pub whitespace_after_if: SimpleWhitespace<'a>, + pub whitespace_before_colon: SimpleWhitespace<'a>, + + pub(crate) case_tok: TokenRef<'a>, + pub(crate) if_tok: Option>, + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for MatchCase<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + for l in &self.leading_lines { + l.codegen(state); + } + state.add_indent(); + state.add_token("case"); + self.whitespace_after_case.codegen(state); + self.pattern.codegen(state); + if let Some(guard) = &self.guard { + self.whitespace_before_if.codegen(state); + state.add_token("if"); + self.whitespace_after_if.codegen(state); + guard.codegen(state); + } + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.body.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchCase<'r, 'a> { + type Inflated = MatchCase<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let leading_lines = parse_empty_lines( + config, + &mut self.case_tok.whitespace_before.borrow_mut(), + None, + )?; + let whitespace_after_case = + parse_simple_whitespace(config, &mut self.case_tok.whitespace_after.borrow_mut())?; + let pattern = self.pattern.inflate(config)?; + let (whitespace_before_if, whitespace_after_if, guard) = + if let Some(if_tok) = self.if_tok.as_mut() { + ( + parse_simple_whitespace(config, &mut if_tok.whitespace_before.borrow_mut())?, + parse_simple_whitespace(config, &mut if_tok.whitespace_after.borrow_mut())?, + self.guard.inflate(config)?, + ) + } else { + Default::default() + }; + let whitespace_before_colon = + parse_simple_whitespace(config, &mut self.colon_tok.whitespace_before.borrow_mut())?; + let body = self.body.inflate(config)?; + Ok(Self::Inflated { + pattern, + guard, + body, + leading_lines, + whitespace_after_case, + whitespace_before_if, + whitespace_after_if, + whitespace_before_colon, + }) + } +} + +#[allow(clippy::large_enum_variant)] +#[cst_node(Codegen, Inflate, ParenthesizedNode)] +pub enum MatchPattern<'a> { + Value(MatchValue<'a>), + Singleton(MatchSingleton<'a>), + Sequence(MatchSequence<'a>), + Mapping(MatchMapping<'a>), + Class(MatchClass<'a>), + As(Box>), + Or(Box>), +} + +#[cst_node] +pub struct MatchValue<'a> { + pub value: Expression<'a>, +} + +impl<'a> ParenthesizedNode<'a> for MatchValue<'a> { + fn lpar(&self) -> &Vec> { + self.value.lpar() + } + fn rpar(&self) -> &Vec> { + self.value.rpar() + } + fn parenthesize(&self, state: &mut CodegenState<'a>, f: F) + where + F: FnOnce(&mut CodegenState<'a>), + { + self.value.parenthesize(state, f) + } + fn with_parens(self, left: LeftParen<'a>, right: RightParen<'a>) -> Self { + Self { + value: self.value.with_parens(left, right), + } + } +} + +impl<'a> Codegen<'a> for MatchValue<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.value.codegen(state) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchValue<'r, 'a> { + type Inflated = MatchValue<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let value = self.value.inflate(config)?; + Ok(Self::Inflated { value }) + } +} + +impl<'r, 'a> ParenthesizedDeflatedNode<'r, 'a> for DeflatedMatchValue<'r, 'a> { + fn lpar(&self) -> &Vec> { + self.value.lpar() + } + fn rpar(&self) -> &Vec> { + self.value.rpar() + } + fn with_parens( + self, + left: DeflatedLeftParen<'r, 'a>, + right: DeflatedRightParen<'r, 'a>, + ) -> Self { + Self { + value: self.value.with_parens(left, right), + } + } +} + +#[cst_node] +pub struct MatchSingleton<'a> { + pub value: Name<'a>, +} + +impl<'a> ParenthesizedNode<'a> for MatchSingleton<'a> { + fn lpar(&self) -> &Vec> { + self.value.lpar() + } + fn rpar(&self) -> &Vec> { + self.value.rpar() + } + fn parenthesize(&self, state: &mut CodegenState<'a>, f: F) + where + F: FnOnce(&mut CodegenState<'a>), + { + self.value.parenthesize(state, f) + } + fn with_parens(self, left: LeftParen<'a>, right: RightParen<'a>) -> Self { + Self { + value: self.value.with_parens(left, right), + } + } +} + +impl<'a> Codegen<'a> for MatchSingleton<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.value.codegen(state) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchSingleton<'r, 'a> { + type Inflated = MatchSingleton<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let value = self.value.inflate(config)?; + Ok(Self::Inflated { value }) + } +} + +impl<'r, 'a> ParenthesizedDeflatedNode<'r, 'a> for DeflatedMatchSingleton<'r, 'a> { + fn lpar(&self) -> &Vec> { + self.value.lpar() + } + fn rpar(&self) -> &Vec> { + self.value.rpar() + } + fn with_parens( + self, + left: DeflatedLeftParen<'r, 'a>, + right: DeflatedRightParen<'r, 'a>, + ) -> Self { + Self { + value: self.value.with_parens(left, right), + } + } +} + +#[allow(clippy::large_enum_variant)] +#[cst_node(Codegen, Inflate, ParenthesizedNode)] +pub enum MatchSequence<'a> { + MatchList(MatchList<'a>), + MatchTuple(MatchTuple<'a>), +} + +#[cst_node(ParenthesizedNode)] +pub struct MatchList<'a> { + pub patterns: Vec>, + pub lbracket: Option>, + pub rbracket: Option>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for MatchList<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.lbracket.codegen(state); + let len = self.patterns.len(); + if len == 1 { + self.patterns.first().unwrap().codegen(state, false, false); + } else { + for (idx, pat) in self.patterns.iter().enumerate() { + pat.codegen(state, idx < len - 1, true); + } + } + self.rbracket.codegen(state); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchList<'r, 'a> { + type Inflated = MatchList<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let lbracket = self.lbracket.inflate(config)?; + + let len = self.patterns.len(); + let patterns = self + .patterns + .into_iter() + .enumerate() + .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) + .collect::>>()?; + + let rbracket = self.rbracket.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + patterns, + lbracket, + rbracket, + lpar, + rpar, + }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct MatchTuple<'a> { + pub patterns: Vec>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for MatchTuple<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + let len = self.patterns.len(); + if len == 1 { + self.patterns.first().unwrap().codegen(state, true, false); + } else { + for (idx, pat) in self.patterns.iter().enumerate() { + pat.codegen(state, idx < len - 1, true); + } + } + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchTuple<'r, 'a> { + type Inflated = MatchTuple<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let len = self.patterns.len(); + let patterns = self + .patterns + .into_iter() + .enumerate() + .map(|(idx, el)| el.inflate_element(config, idx + 1 == len)) + .collect::>>()?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + patterns, + lpar, + rpar, + }) + } +} + +#[allow(clippy::large_enum_variant)] +#[cst_node] +pub enum StarrableMatchSequenceElement<'a> { + Simple(MatchSequenceElement<'a>), + Starred(MatchStar<'a>), +} + +impl<'a> StarrableMatchSequenceElement<'a> { + fn codegen( + &self, + state: &mut CodegenState<'a>, + default_comma: bool, + default_comma_whitespace: bool, + ) { + match &self { + Self::Simple(s) => s.codegen(state, default_comma, default_comma_whitespace), + Self::Starred(s) => s.codegen(state, default_comma, default_comma_whitespace), + } + } +} +impl<'r, 'a> DeflatedStarrableMatchSequenceElement<'r, 'a> { + fn inflate_element( + self, + config: &Config<'a>, + last_element: bool, + ) -> Result> { + Ok(match self { + Self::Simple(s) => { + StarrableMatchSequenceElement::Simple(s.inflate_element(config, last_element)?) + } + Self::Starred(s) => { + StarrableMatchSequenceElement::Starred(s.inflate_element(config, last_element)?) + } + }) + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedStarrableMatchSequenceElement<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + match self { + Self::Simple(s) => Self::Simple(s.with_comma(comma)), + Self::Starred(s) => Self::Starred(s.with_comma(comma)), + } + } +} + +#[cst_node] +pub struct MatchSequenceElement<'a> { + pub value: MatchPattern<'a>, + pub comma: Option>, +} + +impl<'a> MatchSequenceElement<'a> { + fn codegen( + &self, + state: &mut CodegenState<'a>, + default_comma: bool, + default_comma_whitespace: bool, + ) { + self.value.codegen(state); + self.comma.codegen(state); + if self.comma.is_none() && default_comma { + state.add_token(if default_comma_whitespace { ", " } else { "," }); + } + } +} +impl<'r, 'a> DeflatedMatchSequenceElement<'r, 'a> { + fn inflate_element( + self, + config: &Config<'a>, + last_element: bool, + ) -> Result> { + let value = self.value.inflate(config)?; + let comma = if last_element { + self.comma.map(|c| c.inflate_before(config)).transpose() + } else { + self.comma.inflate(config) + }?; + Ok(MatchSequenceElement { value, comma }) + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedMatchSequenceElement<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + Self { + comma: Some(comma), + ..self + } + } +} + +#[cst_node] +pub struct MatchStar<'a> { + pub name: Option>, + pub comma: Option>, + pub whitespace_before_name: ParenthesizableWhitespace<'a>, + + pub(crate) star_tok: TokenRef<'a>, +} + +impl<'a> MatchStar<'a> { + fn codegen( + &self, + state: &mut CodegenState<'a>, + default_comma: bool, + default_comma_whitespace: bool, + ) { + state.add_token("*"); + self.whitespace_before_name.codegen(state); + if let Some(name) = &self.name { + name.codegen(state); + } else { + state.add_token("_"); + } + self.comma.codegen(state); + if self.comma.is_none() && default_comma { + state.add_token(if default_comma_whitespace { ", " } else { "," }); + } + } +} +impl<'r, 'a> DeflatedMatchStar<'r, 'a> { + fn inflate_element(self, config: &Config<'a>, last_element: bool) -> Result> { + let whitespace_before_name = parse_parenthesizable_whitespace( + config, + &mut self.star_tok.whitespace_after.borrow_mut(), + )?; + let name = self.name.inflate(config)?; + let comma = if last_element { + self.comma.map(|c| c.inflate_before(config)).transpose() + } else { + self.comma.inflate(config) + }?; + Ok(MatchStar { + name, + comma, + whitespace_before_name, + }) + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedMatchStar<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + Self { + comma: Some(comma), + ..self + } + } +} + +#[cst_node(ParenthesizedNode)] +pub struct MatchMapping<'a> { + pub elements: Vec>, + pub rest: Option>, + pub trailing_comma: Option>, + pub lbrace: LeftCurlyBrace<'a>, + pub rbrace: RightCurlyBrace<'a>, + pub lpar: Vec>, + pub rpar: Vec>, + + pub whitespace_before_rest: SimpleWhitespace<'a>, + + pub(crate) star_tok: Option>, +} + +impl<'a> Codegen<'a> for MatchMapping<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.lbrace.codegen(state); + let len = self.elements.len(); + for (idx, el) in self.elements.iter().enumerate() { + el.codegen(state, self.rest.is_some() || idx < len - 1); + } + if let Some(rest) = &self.rest { + state.add_token("**"); + self.whitespace_before_rest.codegen(state); + rest.codegen(state); + self.trailing_comma.codegen(state); + } + self.rbrace.codegen(state); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchMapping<'r, 'a> { + type Inflated = MatchMapping<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let lbrace = self.lbrace.inflate(config)?; + + let len = self.elements.len(); + let no_star = self.star_tok.is_none(); + let elements = self + .elements + .into_iter() + .enumerate() + .map(|(idx, el)| el.inflate_element(config, no_star && idx + 1 == len)) + .collect::>>()?; + + let (whitespace_before_rest, rest, trailing_comma) = + if let Some(star_tok) = self.star_tok.as_mut() { + ( + parse_simple_whitespace(config, &mut star_tok.whitespace_after.borrow_mut())?, + self.rest.inflate(config)?, + self.trailing_comma + .map(|c| c.inflate_before(config)) + .transpose()?, + ) + } else { + Default::default() + }; + + let rbrace = self.rbrace.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + elements, + rest, + trailing_comma, + lbrace, + rbrace, + lpar, + rpar, + whitespace_before_rest, + }) + } +} + +#[cst_node] +pub struct MatchMappingElement<'a> { + pub key: Expression<'a>, + pub pattern: MatchPattern<'a>, + pub comma: Option>, + + pub whitespace_before_colon: ParenthesizableWhitespace<'a>, + pub whitespace_after_colon: ParenthesizableWhitespace<'a>, + + pub(crate) colon_tok: TokenRef<'a>, +} + +impl<'a> MatchMappingElement<'a> { + fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { + self.key.codegen(state); + self.whitespace_before_colon.codegen(state); + state.add_token(":"); + self.whitespace_after_colon.codegen(state); + self.pattern.codegen(state); + self.comma.codegen(state); + if self.comma.is_none() && default_comma { + state.add_token(", "); + } + } +} +impl<'r, 'a> DeflatedMatchMappingElement<'r, 'a> { + fn inflate_element( + self, + config: &Config<'a>, + last_element: bool, + ) -> Result> { + let key = self.key.inflate(config)?; + let whitespace_before_colon = parse_parenthesizable_whitespace( + config, + &mut self.colon_tok.whitespace_before.borrow_mut(), + )?; + let whitespace_after_colon = parse_parenthesizable_whitespace( + config, + &mut self.colon_tok.whitespace_after.borrow_mut(), + )?; + let pattern = self.pattern.inflate(config)?; + let comma = if last_element { + self.comma.map(|c| c.inflate_before(config)).transpose() + } else { + self.comma.inflate(config) + }?; + Ok(MatchMappingElement { + key, + pattern, + comma, + whitespace_before_colon, + whitespace_after_colon, + }) + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedMatchMappingElement<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + Self { + comma: Some(comma), + ..self + } + } +} + +#[cst_node(ParenthesizedNode)] +pub struct MatchClass<'a> { + pub cls: NameOrAttribute<'a>, + pub patterns: Vec>, + pub kwds: Vec>, + pub lpar: Vec>, + pub rpar: Vec>, + + pub whitespace_after_cls: ParenthesizableWhitespace<'a>, + pub whitespace_before_patterns: ParenthesizableWhitespace<'a>, + pub whitespace_after_kwds: ParenthesizableWhitespace<'a>, + + pub(crate) lpar_tok: TokenRef<'a>, + pub(crate) rpar_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for MatchClass<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + self.cls.codegen(state); + self.whitespace_after_cls.codegen(state); + state.add_token("("); + self.whitespace_before_patterns.codegen(state); + let patlen = self.patterns.len(); + let kwdlen = self.kwds.len(); + for (idx, pat) in self.patterns.iter().enumerate() { + pat.codegen(state, idx < patlen - 1 + kwdlen, patlen == 1 && kwdlen == 0); + } + for (idx, kwd) in self.kwds.iter().enumerate() { + kwd.codegen(state, idx < kwdlen - 1); + } + self.whitespace_after_kwds.codegen(state); + state.add_token(")"); + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchClass<'r, 'a> { + type Inflated = MatchClass<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + + let cls = self.cls.inflate(config)?; + let whitespace_after_cls = parse_parenthesizable_whitespace( + config, + &mut self.lpar_tok.whitespace_before.borrow_mut(), + )?; + let whitespace_before_patterns = parse_parenthesizable_whitespace( + config, + &mut self.lpar_tok.whitespace_after.borrow_mut(), + )?; + + let patlen = self.patterns.len(); + let kwdlen = self.kwds.len(); + let patterns = self + .patterns + .into_iter() + .enumerate() + .map(|(idx, pat)| pat.inflate_element(config, idx + 1 == patlen + kwdlen)) + .collect::>()?; + let kwds = self + .kwds + .into_iter() + .enumerate() + .map(|(idx, kwd)| kwd.inflate_element(config, idx + 1 == kwdlen)) + .collect::>()?; + + let whitespace_after_kwds = parse_parenthesizable_whitespace( + config, + &mut self.rpar_tok.whitespace_before.borrow_mut(), + )?; + + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + cls, + patterns, + kwds, + lpar, + rpar, + whitespace_after_cls, + whitespace_before_patterns, + whitespace_after_kwds, + }) + } +} + +#[cst_node] +pub struct MatchKeywordElement<'a> { + pub key: Name<'a>, + pub pattern: MatchPattern<'a>, + pub comma: Option>, + + pub whitespace_before_equal: ParenthesizableWhitespace<'a>, + pub whitespace_after_equal: ParenthesizableWhitespace<'a>, + + pub(crate) equal_tok: TokenRef<'a>, +} + +impl<'a> MatchKeywordElement<'a> { + fn codegen(&self, state: &mut CodegenState<'a>, default_comma: bool) { + self.key.codegen(state); + self.whitespace_before_equal.codegen(state); + state.add_token("="); + self.whitespace_after_equal.codegen(state); + self.pattern.codegen(state); + self.comma.codegen(state); + if self.comma.is_none() && default_comma { + state.add_token(", "); + } + } +} +impl<'r, 'a> DeflatedMatchKeywordElement<'r, 'a> { + fn inflate_element( + self, + config: &Config<'a>, + last_element: bool, + ) -> Result> { + let key = self.key.inflate(config)?; + let whitespace_before_equal = parse_parenthesizable_whitespace( + config, + &mut self.equal_tok.whitespace_before.borrow_mut(), + )?; + let whitespace_after_equal = parse_parenthesizable_whitespace( + config, + &mut self.equal_tok.whitespace_after.borrow_mut(), + )?; + let pattern = self.pattern.inflate(config)?; + let comma = if last_element { + self.comma.map(|c| c.inflate_before(config)).transpose() + } else { + self.comma.inflate(config) + }?; + Ok(MatchKeywordElement { + key, + pattern, + comma, + whitespace_before_equal, + whitespace_after_equal, + }) + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedMatchKeywordElement<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + Self { + comma: Some(comma), + ..self + } + } +} + +#[cst_node(ParenthesizedNode)] +pub struct MatchAs<'a> { + pub pattern: Option>, + pub name: Option>, + pub lpar: Vec>, + pub rpar: Vec>, + + pub whitespace_before_as: Option>, + pub whitespace_after_as: Option>, + + pub(crate) as_tok: Option>, +} + +impl<'a> Codegen<'a> for MatchAs<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + if let Some(pat) = &self.pattern { + pat.codegen(state); + self.whitespace_before_as.codegen(state); + state.add_token("as"); + self.whitespace_after_as.codegen(state); + } + if let Some(name) = &self.name { + name.codegen(state); + } else { + state.add_token("_"); + } + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchAs<'r, 'a> { + type Inflated = MatchAs<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let pattern = self.pattern.inflate(config)?; + let (whitespace_before_as, whitespace_after_as) = if let Some(as_tok) = self.as_tok.as_mut() + { + ( + Some(parse_parenthesizable_whitespace( + config, + &mut as_tok.whitespace_before.borrow_mut(), + )?), + Some(parse_parenthesizable_whitespace( + config, + &mut as_tok.whitespace_after.borrow_mut(), + )?), + ) + } else { + Default::default() + }; + let name = self.name.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + pattern, + name, + lpar, + rpar, + whitespace_before_as, + whitespace_after_as, + }) + } +} + +#[cst_node] +pub struct MatchOrElement<'a> { + pub pattern: MatchPattern<'a>, + pub separator: Option>, +} + +impl<'a> MatchOrElement<'a> { + fn codegen(&self, state: &mut CodegenState<'a>, default_separator: bool) { + self.pattern.codegen(state); + self.separator.codegen(state); + if self.separator.is_none() && default_separator { + state.add_token(" | "); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchOrElement<'r, 'a> { + type Inflated = MatchOrElement<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let pattern = self.pattern.inflate(config)?; + let separator = self.separator.inflate(config)?; + Ok(Self::Inflated { pattern, separator }) + } +} + +#[cst_node(ParenthesizedNode)] +pub struct MatchOr<'a> { + pub patterns: Vec>, + pub lpar: Vec>, + pub rpar: Vec>, +} + +impl<'a> Codegen<'a> for MatchOr<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.parenthesize(state, |state| { + let len = self.patterns.len(); + for (idx, pat) in self.patterns.iter().enumerate() { + pat.codegen(state, idx + 1 < len) + } + }) + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedMatchOr<'r, 'a> { + type Inflated = MatchOr<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lpar = self.lpar.inflate(config)?; + let patterns = self.patterns.inflate(config)?; + let rpar = self.rpar.inflate(config)?; + Ok(Self::Inflated { + patterns, + lpar, + rpar, + }) + } +} + +#[cst_node] +pub struct TypeVar<'a> { + pub name: Name<'a>, + pub bound: Option>>, + pub colon: Option>, +} + +impl<'a> Codegen<'a> for TypeVar<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.name.codegen(state); + self.colon.codegen(state); + if let Some(bound) = &self.bound { + bound.codegen(state); + } + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedTypeVar<'r, 'a> { + type Inflated = TypeVar<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let name = self.name.inflate(config)?; + let colon = self.colon.inflate(config)?; + let bound = self.bound.inflate(config)?; + Ok(Self::Inflated { name, bound, colon }) + } +} + +#[cst_node] +pub struct TypeVarTuple<'a> { + pub name: Name<'a>, + + pub whitespace_after_star: SimpleWhitespace<'a>, + + pub(crate) star_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for TypeVarTuple<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("*"); + self.whitespace_after_star.codegen(state); + self.name.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedTypeVarTuple<'r, 'a> { + type Inflated = TypeVarTuple<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_star = + parse_simple_whitespace(config, &mut self.star_tok.whitespace_after.borrow_mut())?; + let name = self.name.inflate(config)?; + Ok(Self::Inflated { + name, + whitespace_after_star, + }) + } +} + +#[cst_node] +pub struct ParamSpec<'a> { + pub name: Name<'a>, + + pub whitespace_after_star: SimpleWhitespace<'a>, + + pub(crate) star_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for ParamSpec<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("**"); + self.whitespace_after_star.codegen(state); + self.name.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedParamSpec<'r, 'a> { + type Inflated = ParamSpec<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_star = + parse_simple_whitespace(config, &mut self.star_tok.whitespace_after.borrow_mut())?; + let name = self.name.inflate(config)?; + Ok(Self::Inflated { + name, + whitespace_after_star, + }) + } +} + +#[cst_node(Inflate, Codegen)] +pub enum TypeVarLike<'a> { + TypeVar(TypeVar<'a>), + TypeVarTuple(TypeVarTuple<'a>), + ParamSpec(ParamSpec<'a>), +} + +#[cst_node] +pub struct TypeParam<'a> { + pub param: TypeVarLike<'a>, + pub comma: Option>, + pub equal: Option>, + pub star: &'a str, + pub whitespace_after_star: SimpleWhitespace<'a>, + pub default: Option>, + pub star_tok: Option>, +} + +impl<'a> Codegen<'a> for TypeParam<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.param.codegen(state); + self.equal.codegen(state); + state.add_token(self.star); + self.whitespace_after_star.codegen(state); + self.default.codegen(state); + self.comma.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedTypeParam<'r, 'a> { + type Inflated = TypeParam<'a>; + fn inflate(mut self, config: &Config<'a>) -> Result { + let whitespace_after_star = if let Some(star_tok) = self.star_tok.as_mut() { + parse_simple_whitespace(config, &mut star_tok.whitespace_after.borrow_mut())? + } else { + Default::default() + }; + let param = self.param.inflate(config)?; + let equal = self.equal.inflate(config)?; + let default = self.default.inflate(config)?; + let comma = self.comma.inflate(config)?; + Ok(Self::Inflated { + param, + comma, + equal, + star: self.star, + whitespace_after_star, + default, + }) + } +} + +impl<'r, 'a> WithComma<'r, 'a> for DeflatedTypeParam<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self { + Self { + comma: Some(comma), + ..self + } + } +} + +#[cst_node] +pub struct TypeParameters<'a> { + pub params: Vec>, + + pub lbracket: LeftSquareBracket<'a>, + pub rbracket: RightSquareBracket<'a>, +} + +impl<'a> Codegen<'a> for TypeParameters<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.lbracket.codegen(state); + let params_len = self.params.len(); + for (idx, param) in self.params.iter().enumerate() { + param.codegen(state); + if idx + 1 < params_len && param.comma.is_none() { + state.add_token(", "); + } + } + self.rbracket.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedTypeParameters<'r, 'a> { + type Inflated = TypeParameters<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let lbracket = self.lbracket.inflate(config)?; + let params = self.params.inflate(config)?; + let rbracket = self.rbracket.inflate(config)?; + Ok(Self::Inflated { + params, + lbracket, + rbracket, + }) + } +} + +#[cst_node] +pub struct TypeAlias<'a> { + pub name: Name<'a>, + pub value: Box>, + pub type_parameters: Option>, + + pub whitespace_after_type: SimpleWhitespace<'a>, + pub whitespace_after_name: Option>, + pub whitespace_after_type_parameters: Option>, + pub whitespace_after_equals: SimpleWhitespace<'a>, + pub semicolon: Option>, + + pub(crate) type_tok: TokenRef<'a>, + pub(crate) lbracket_tok: Option>, + pub(crate) equals_tok: TokenRef<'a>, +} + +impl<'a> Codegen<'a> for TypeAlias<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token("type"); + self.whitespace_after_type.codegen(state); + self.name.codegen(state); + if self.whitespace_after_name.is_none() && self.type_parameters.is_none() { + state.add_token(" "); + } else { + self.whitespace_after_name.codegen(state); + } + if self.type_parameters.is_some() { + self.type_parameters.codegen(state); + self.whitespace_after_type_parameters.codegen(state); + } + state.add_token("="); + self.whitespace_after_equals.codegen(state); + self.value.codegen(state); + self.semicolon.codegen(state); + } +} + +impl<'r, 'a> Inflate<'a> for DeflatedTypeAlias<'r, 'a> { + type Inflated = TypeAlias<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let whitespace_after_type = + parse_simple_whitespace(config, &mut self.type_tok.whitespace_after.borrow_mut())?; + let name = self.name.inflate(config)?; + let whitespace_after_name = Some(if let Some(tok) = self.lbracket_tok { + parse_simple_whitespace(config, &mut tok.whitespace_before.borrow_mut()) + } else { + parse_simple_whitespace(config, &mut self.equals_tok.whitespace_before.borrow_mut()) + }?); + let type_parameters = self.type_parameters.inflate(config)?; + let whitespace_after_type_parameters = if type_parameters.is_some() { + Some(parse_simple_whitespace( + config, + &mut self.equals_tok.whitespace_before.borrow_mut(), + )?) + } else { + None + }; + let whitespace_after_equals = + parse_simple_whitespace(config, &mut self.equals_tok.whitespace_after.borrow_mut())?; + let value = self.value.inflate(config)?; + let semicolon = self.semicolon.inflate(config)?; + Ok(Self::Inflated { + name, + value, + type_parameters, + whitespace_after_type, + whitespace_after_name, + whitespace_after_type_parameters, + whitespace_after_equals, + semicolon, + }) + } +} + +impl<'r, 'a> DeflatedTypeAlias<'r, 'a> { + pub fn with_semicolon(self, semicolon: Option>) -> Self { + Self { semicolon, ..self } + } +} diff --git a/native/libcst/src/nodes/test_utils.rs b/native/libcst/src/nodes/test_utils.rs new file mode 100644 index 00000000..675b493d --- /dev/null +++ b/native/libcst/src/nodes/test_utils.rs @@ -0,0 +1,42 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use pyo3::prelude::*; + +py_import!("libcst._nodes.deep_equals", "deep_equals", get_deep_equals); + +pub fn repr_or_panic(py: Python, value: T) -> String +where + T: ToPyObject, +{ + value + .to_object(py) + .as_ref(py) + .repr() + .expect("failed to call repr") + .extract() + .expect("repr should've returned str") +} + +pub fn py_assert_deep_equals(py: Python, left: L, right: R) +where + L: ToPyObject, + R: ToPyObject, +{ + let (left, right) = (left.to_object(py), right.to_object(py)); + let equals = get_deep_equals(py) + .expect("failed to import deep_equals") + .call1((&left, &right)) + .expect("failed to call deep_equals") + .extract::() + .expect("deep_equals should return a bool"); + if !equals { + panic!( + "assertion failed: {} was not deeply equal to {}", + repr_or_panic(py, &left), + repr_or_panic(py, &right), + ); + } +} diff --git a/native/libcst/src/nodes/traits.rs b/native/libcst/src/nodes/traits.rs new file mode 100644 index 00000000..c15a60e1 --- /dev/null +++ b/native/libcst/src/nodes/traits.rs @@ -0,0 +1,182 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use crate::{ + nodes::expression::{DeflatedLeftParen, DeflatedRightParen}, + nodes::op::DeflatedComma, + tokenizer::whitespace_parser::{Config, WhitespaceError}, + Codegen, CodegenState, EmptyLine, LeftParen, RightParen, +}; +use std::ops::Deref; + +pub trait WithComma<'r, 'a> { + fn with_comma(self, comma: DeflatedComma<'r, 'a>) -> Self; +} + +pub trait ParenthesizedNode<'a> { + fn lpar(&self) -> &Vec>; + fn rpar(&self) -> &Vec>; + + fn parenthesize(&self, state: &mut CodegenState<'a>, f: F) + where + F: FnOnce(&mut CodegenState<'a>), + { + for lpar in self.lpar() { + lpar.codegen(state); + } + f(state); + for rpar in self.rpar() { + rpar.codegen(state); + } + } + + fn with_parens(self, left: LeftParen<'a>, right: RightParen<'a>) -> Self; +} + +impl<'a, T: ParenthesizedNode<'a>> ParenthesizedNode<'a> for Box { + fn lpar(&self) -> &Vec> { + self.deref().lpar() + } + fn rpar(&self) -> &Vec> { + self.deref().rpar() + } + fn parenthesize(&self, state: &mut CodegenState<'a>, f: F) + where + F: FnOnce(&mut CodegenState<'a>), + { + self.deref().parenthesize(state, f) + } + fn with_parens(self, left: LeftParen<'a>, right: RightParen<'a>) -> Self { + Self::new((*self).with_parens(left, right)) + } +} + +pub trait ParenthesizedDeflatedNode<'r, 'a> { + fn lpar(&self) -> &Vec>; + fn rpar(&self) -> &Vec>; + + fn with_parens( + self, + left: DeflatedLeftParen<'r, 'a>, + right: DeflatedRightParen<'r, 'a>, + ) -> Self; +} +impl<'r, 'a, T: ParenthesizedDeflatedNode<'r, 'a>> ParenthesizedDeflatedNode<'r, 'a> for Box { + fn lpar(&self) -> &Vec> { + self.deref().lpar() + } + fn rpar(&self) -> &Vec> { + self.deref().rpar() + } + fn with_parens( + self, + left: DeflatedLeftParen<'r, 'a>, + right: DeflatedRightParen<'r, 'a>, + ) -> Self { + Self::new((*self).with_parens(left, right)) + } +} + +pub trait WithLeadingLines<'a> { + fn leading_lines(&mut self) -> &mut Vec>; +} + +pub type Result = std::result::Result; + +pub trait Inflate<'a> +where + Self: Sized, +{ + type Inflated; + fn inflate(self, config: &Config<'a>) -> Result; +} + +impl<'a, T: Inflate<'a>> Inflate<'a> for Option { + type Inflated = Option; + fn inflate(self, config: &Config<'a>) -> Result { + self.map(|x| x.inflate(config)).transpose() + } +} + +impl<'a, T: Inflate<'a> + ?Sized> Inflate<'a> for Box { + type Inflated = Box; + fn inflate(self, config: &Config<'a>) -> Result { + match (*self).inflate(config) { + Ok(a) => Ok(Box::new(a)), + Err(e) => Err(e), + } + } +} + +impl<'a, T: Inflate<'a>> Inflate<'a> for Vec { + type Inflated = Vec; + fn inflate(self, config: &Config<'a>) -> Result { + self.into_iter().map(|item| item.inflate(config)).collect() + } +} +#[cfg(feature = "py")] +pub mod py { + use pyo3::{types::PyTuple, IntoPyObjectExt, Py, PyAny, PyResult, Python}; + + // TODO: replace with upstream implementation once + // https://github.com/PyO3/pyo3/issues/1813 is resolved + pub trait TryIntoPy: Sized { + fn try_into_py(self, py: Python) -> PyResult; + } + + // I wish: + // impl> TryIntoPy for T { + // fn try_into_py(self, py: Python) -> PyResult { + // Ok(self.into_py(py)) + // } + // } + + impl TryIntoPy> for bool { + fn try_into_py(self, py: Python) -> PyResult> { + self.into_py_any(py) + } + } + + impl>> TryIntoPy> for Box + where + T: TryIntoPy>, + { + fn try_into_py(self, py: Python) -> PyResult> { + (*self).try_into_py(py) + } + } + + impl TryIntoPy> for Option + where + T: TryIntoPy>, + { + fn try_into_py(self, py: Python) -> PyResult> { + Ok(match self { + None => py.None(), + Some(x) => x.try_into_py(py)?, + }) + } + } + + impl TryIntoPy> for Vec + where + T: TryIntoPy>, + { + fn try_into_py(self, py: Python) -> PyResult> { + let converted = self + .into_iter() + .map(|x| x.try_into_py(py)) + .collect::>>()? + .into_iter(); + PyTuple::new(py, converted)?.into_py_any(py) + } + } + + impl<'a> TryIntoPy> for &'a str { + fn try_into_py(self, py: Python) -> PyResult> { + self.into_py_any(py) + } + } +} diff --git a/native/libcst/src/nodes/whitespace.rs b/native/libcst/src/nodes/whitespace.rs new file mode 100644 index 00000000..474ee384 --- /dev/null +++ b/native/libcst/src/nodes/whitespace.rs @@ -0,0 +1,175 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +#[cfg(feature = "py")] +use libcst_derive::TryIntoPy; + +use super::{Codegen, CodegenState}; + +#[derive(Debug, Eq, PartialEq, Default, Clone)] +#[cfg_attr(feature = "py", derive(TryIntoPy))] +pub struct SimpleWhitespace<'a>(pub &'a str); + +impl<'a> Codegen<'a> for SimpleWhitespace<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token(self.0); + } +} + +#[derive(Debug, Eq, PartialEq, Clone)] +#[cfg_attr(feature = "py", derive(TryIntoPy))] +pub struct Comment<'a>(pub &'a str); + +impl<'a> Default for Comment<'a> { + fn default() -> Self { + Self("#") + } +} + +impl<'a> Codegen<'a> for Comment<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + state.add_token(self.0); + } +} + +#[derive(Debug, Eq, PartialEq, Default, Clone)] +#[cfg_attr(feature = "py", derive(TryIntoPy))] +pub struct Newline<'a>(pub Option<&'a str>, pub Fakeness); + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Fakeness { + Fake, + Real, +} + +impl Default for Fakeness { + fn default() -> Self { + Self::Real + } +} + +impl<'a> Codegen<'a> for Newline<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + if let Fakeness::Fake = self.1 { + return; + } + if let Some(value) = self.0 { + state.add_token(value); + } else { + state.add_token(state.default_newline); + } + } +} + +#[derive(Debug, Eq, PartialEq, Default, Clone)] +#[cfg_attr(feature = "py", derive(TryIntoPy))] +pub struct TrailingWhitespace<'a> { + pub whitespace: SimpleWhitespace<'a>, + pub comment: Option>, + pub newline: Newline<'a>, +} + +impl<'a> Codegen<'a> for TrailingWhitespace<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.whitespace.codegen(state); + if let Some(comment) = &self.comment { + comment.codegen(state); + } + self.newline.codegen(state); + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "py", derive(TryIntoPy))] +pub struct EmptyLine<'a> { + pub indent: bool, + pub whitespace: SimpleWhitespace<'a>, + pub comment: Option>, + pub newline: Newline<'a>, +} + +impl<'a> Codegen<'a> for EmptyLine<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + if self.indent { + state.add_indent() + } + self.whitespace.codegen(state); + if let Some(comment) = &self.comment { + comment.codegen(state); + } + self.newline.codegen(state); + } +} + +impl<'a> Default for EmptyLine<'a> { + fn default() -> Self { + Self { + indent: true, + whitespace: Default::default(), + comment: Default::default(), + newline: Default::default(), + } + } +} + +impl<'a> EmptyLine<'a> { + pub fn new( + indent: bool, + whitespace: SimpleWhitespace<'a>, + comment: Option>, + newline: Newline<'a>, + ) -> Self { + Self { + indent, + whitespace, + comment, + newline, + } + } +} + +#[derive(Debug, Eq, PartialEq, Default, Clone)] +#[cfg_attr(feature = "py", derive(TryIntoPy))] +pub struct ParenthesizedWhitespace<'a> { + pub first_line: TrailingWhitespace<'a>, + pub empty_lines: Vec>, + pub indent: bool, + pub last_line: SimpleWhitespace<'a>, +} + +impl<'a> Codegen<'a> for ParenthesizedWhitespace<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + self.first_line.codegen(state); + for line in &self.empty_lines { + line.codegen(state); + } + if self.indent { + state.add_indent() + } + self.last_line.codegen(state); + } +} + +#[derive(Debug, Eq, PartialEq, Clone)] +#[cfg_attr(feature = "py", derive(TryIntoPy))] +pub enum ParenthesizableWhitespace<'a> { + SimpleWhitespace(SimpleWhitespace<'a>), + ParenthesizedWhitespace(ParenthesizedWhitespace<'a>), +} + +impl<'a> Codegen<'a> for ParenthesizableWhitespace<'a> { + fn codegen(&self, state: &mut CodegenState<'a>) { + match self { + Self::SimpleWhitespace(w) => w.codegen(state), + Self::ParenthesizedWhitespace(w) => w.codegen(state), + } + } +} + +impl<'a> Default for ParenthesizableWhitespace<'a> { + fn default() -> Self { + Self::SimpleWhitespace(SimpleWhitespace("")) + } +} diff --git a/native/libcst/src/parser/errors.rs b/native/libcst/src/parser/errors.rs new file mode 100644 index 00000000..7fb3b740 --- /dev/null +++ b/native/libcst/src/parser/errors.rs @@ -0,0 +1,72 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use crate::parser::grammar::TokVec; +use crate::tokenizer::whitespace_parser::WhitespaceError; +use crate::tokenizer::TokError; +use peg::Parse; +use thiserror::Error; + +#[allow(clippy::enum_variant_names)] +#[derive(Debug, Error, PartialEq, Eq)] +pub enum ParserError<'a> { + #[error("tokenizer error: {0}")] + TokenizerError(TokError<'a>, &'a str), + #[error("parser error: {0}")] + ParserError( + peg::error::ParseError< as Parse>::PositionRepr>, + &'a str, + ), + #[error(transparent)] + WhitespaceError(#[from] WhitespaceError), + #[error("invalid operator")] + OperatorError, +} + +#[cfg(feature = "py")] +mod py_error { + + use pyo3::types::{IntoPyDict, PyAny, PyAnyMethods, PyModule}; + use pyo3::{Bound, IntoPyObject, PyErr, PyResult, Python}; + + use super::ParserError; + + impl<'a> From> for PyErr { + fn from(e: ParserError) -> Self { + Python::attach(|py| { + let lines = match &e { + ParserError::TokenizerError(_, text) | ParserError::ParserError(_, text) => { + text.lines().collect::>() + } + _ => vec![""], + }; + let (mut line, mut col) = match &e { + ParserError::ParserError(err, ..) => { + (err.location.start_pos.line, err.location.start_pos.column) + } + _ => (0, 0), + }; + if line + 1 > lines.len() { + line = lines.len() - 1; + col = 0; + } + match || -> PyResult> { + let kwargs = [ + ("message", e.to_string().into_pyobject(py)?.into_any()), + ("lines", lines.into_pyobject(py)?.into_any()), + ("raw_line", (line + 1).into_pyobject(py)?.into_any()), + ("raw_column", col.into_pyobject(py)?.into_any()), + ] + .into_py_dict(py)?; + let libcst = PyModule::import(py, "libcst")?; + libcst.getattr("ParserSyntaxError")?.call((), Some(&kwargs)) + }() { + Ok(py_err_value) => PyErr::from_value(py_err_value), + Err(e) => e, + } + }) + } + } +} diff --git a/native/libcst/src/parser/grammar.rs b/native/libcst/src/parser/grammar.rs new file mode 100644 index 00000000..86823961 --- /dev/null +++ b/native/libcst/src/parser/grammar.rs @@ -0,0 +1,3596 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::rc::Rc; + +use crate::expression::make_async; +use crate::nodes::deflated::*; +use crate::nodes::expression::make_fstringtext; +use crate::nodes::expression::make_tstringtext; +use crate::nodes::op::make_importstar; +use crate::nodes::traits::ParenthesizedDeflatedNode; +use crate::parser::ParserError; +use crate::tokenizer::{TokType, Token}; +use crate::WithComma; +use peg::str::LineCol; +use peg::{parser, Parse, ParseElem, RuleResult}; +use TokType::{ + Async, Await as AWAIT, Dedent, EndMarker, FStringEnd, FStringStart, FStringString, Indent, + Name as NameTok, Newline as NL, Number, String as STRING, TStringEnd, TStringStart, + TStringString, +}; + +pub type Result<'a, T> = std::result::Result>; +type GrammarResult = std::result::Result; + +#[derive(Debug)] +pub struct TokVec<'a>(Vec>>); + +impl<'a> std::convert::From>> for TokVec<'a> { + fn from(vec: Vec>) -> Self { + TokVec(vec.into_iter().map(Rc::new).collect()) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ParseLoc { + pub start_pos: LineCol, + pub end_pos: LineCol, +} + +impl std::fmt::Display for ParseLoc { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.start_pos.fmt(f) + } +} + +impl<'a> Parse for TokVec<'a> { + type PositionRepr = ParseLoc; + + fn start(&self) -> usize { + 0 + } + + fn is_eof(&self, pos: usize) -> bool { + pos >= self.0.len() + } + + fn position_repr(&self, pos: usize) -> Self::PositionRepr { + let tok = self.0.get(pos).unwrap_or_else(|| self.0.last().unwrap()); + ParseLoc { + start_pos: LineCol { + line: tok.start_pos.line_number(), + column: tok.start_pos.char_column_number(), + offset: tok.start_pos.byte_idx(), + }, + end_pos: LineCol { + line: tok.end_pos.line_number(), + column: tok.end_pos.char_column_number(), + offset: tok.end_pos.byte_idx(), + }, + } + } +} + +type TokenRef<'input, 'a> = &'input Token<'a>; + +impl<'input, 'a: 'input> ParseElem<'input> for TokVec<'a> { + type Element = TokenRef<'input, 'a>; + + fn parse_elem(&'input self, pos: usize) -> RuleResult { + match self.0.get(pos) { + Some(tok) => RuleResult::Matched(pos + 1, tok), + None => RuleResult::Failed, + } + } +} + +const MAX_RECURSION_DEPTH: usize = 3000; + +parser! { + pub grammar python<'a>(input: &'a str) for TokVec<'a> { + + // Starting Rules + + pub rule file(encoding: Option<&str>) -> Module<'input, 'a> + = traced(<_file(encoding.unwrap_or("utf-8"))>) + + pub rule expression_input() -> Expression<'input, 'a> + = traced() + + pub rule statement_input() -> Statement<'input, 'a> + = traced() + + rule _file(encoding: &str) -> Module<'input, 'a> + = s:statements()? eof:tok(EndMarker, "EOF") { + make_module(s.unwrap_or_default(), eof, encoding) + } + + // General statements + + rule statements() -> Vec> + = statement()+ + + rule statement() -> Statement<'input, 'a> + = c:compound_stmt() { Statement::Compound(c) } + / s:simple_stmts() { + Statement::Simple(make_simple_statement_line(s)) + } + + rule simple_stmts() -> SimpleStatementParts<'input, 'a> + = first_tok:&_ stmts:separated_trailer(, ) nl:tok(NL, "NEWLINE") { + SimpleStatementParts { + first_tok, + first_statement: stmts.0, + rest: stmts.1, + last_semi: stmts.2, + nl, + } + } + + #[cache] + rule simple_stmt() -> SmallStatement<'input, 'a> + = assignment() + / &lit("type") s: type_stmt() {SmallStatement::TypeAlias(s)} + / e:star_expressions() { SmallStatement::Expr(Expr { value: e, semicolon: None }) } + / &lit("return") s:return_stmt() { SmallStatement::Return(s) } + // this is expanded from the original grammar's import_stmt rule + / &lit("import") i:import_name() { SmallStatement::Import(i) } + / &lit("from") i:import_from() { SmallStatement::ImportFrom(i) } + / &lit("raise") r:raise_stmt() { SmallStatement::Raise(r) } + / lit("pass") { SmallStatement::Pass(Pass { semicolon: None }) } + / &lit("del") s:del_stmt() { SmallStatement::Del(s) } + / &lit("yield") s:yield_stmt() { SmallStatement::Expr(Expr { value: s, semicolon: None }) } + / &lit("assert") s:assert_stmt() {SmallStatement::Assert(s)} + / lit("break") { SmallStatement::Break(Break { semicolon: None })} + / lit("continue") { SmallStatement::Continue(Continue { semicolon: None })} + / &lit("global") s:global_stmt() {SmallStatement::Global(s)} + / &lit("nonlocal") s:nonlocal_stmt() {SmallStatement::Nonlocal(s)} + + + rule compound_stmt() -> CompoundStatement<'input, 'a> + = &(lit("def") / lit("@") / tok(Async, "ASYNC")) f:function_def() { + CompoundStatement::FunctionDef(f) + } + / &lit("if") f:if_stmt() { CompoundStatement::If(f) } + / &(lit("class") / lit("@")) c:class_def() { CompoundStatement::ClassDef(c) } + / &(lit("with") / tok(Async, "ASYNC")) w:with_stmt() { CompoundStatement::With(w) } + / &(lit("for") / tok(Async, "ASYNC")) f:for_stmt() { CompoundStatement::For(f) } + / &lit("try") t:try_stmt() { CompoundStatement::Try(t) } + / &lit("try") t:try_star_stmt() { CompoundStatement::TryStar(t) } + / &lit("while") w:while_stmt() { CompoundStatement::While(w) } + / m:match_stmt() { CompoundStatement::Match(m) } + + // Simple statements + + rule assignment() -> SmallStatement<'input, 'a> + = a:name() col:lit(":") ann:expression() + rhs:(eq:lit("=") d:annotated_rhs() {(eq, d)})? { + SmallStatement::AnnAssign(make_ann_assignment( + AssignTargetExpression::Name(Box::new(a)), col, ann, rhs)) + } + // TODO: there's an extra '(' single_target ')' clause here in upstream + / a:single_subscript_attribute_target() col:lit(":") ann:expression() + rhs:(eq:lit("=") d:annotated_rhs() {(eq, d)})? { + SmallStatement::AnnAssign(make_ann_assignment(a, col, ann, rhs)) + } + / lhs:(t:star_targets() eq:lit("=") {(t, eq)})+ rhs:(yield_expr() / star_expressions()) !lit("=") { + SmallStatement::Assign(make_assignment(lhs, rhs)) + } + / t:single_target() op:augassign() rhs:(yield_expr() / star_expressions()) { + SmallStatement::AugAssign(make_aug_assign(t, op, rhs)) + } + + rule annotated_rhs() -> Expression<'input, 'a> + = yield_expr() / star_expressions() + + rule augassign() -> AugOp<'input, 'a> + = &(lit("+=") + / lit("-=") + / lit("*=") + / lit("@=") + / lit("/=") + / lit("%=") + / lit("&=") + / lit("|=") + / lit("^=") + / lit("<<=") + / lit(">>=") + / lit("**=") + / lit("//=")) tok:_ {? + make_aug_op(tok).map_err(|_| "aug_op") + } + + rule return_stmt() -> Return<'input, 'a> + = kw:lit("return") a:star_expressions()? { + make_return(kw, a) + } + + rule raise_stmt() -> Raise<'input, 'a> + = kw:lit("raise") exc:expression() + rest:(f:lit("from") cau:expression() {(f, cau)})? { + make_raise(kw, Some(exc), rest) + } + / kw:lit("raise") { + make_raise(kw, None, None) + } + + rule global_stmt() -> Global<'input, 'a> + = kw:lit("global") init:(n:name() c:comma() {(n, c)})* last:name() { + make_global(kw, init, last) + } + + rule nonlocal_stmt() -> Nonlocal<'input, 'a> + = kw:lit("nonlocal") init:(n:name() c:comma() {(n, c)})* last:name() { + make_nonlocal(kw, init, last) + } + + rule del_stmt() -> Del<'input, 'a> + = kw:lit("del") t:del_target() &(lit(";") / tok(NL, "NEWLINE")) { + make_del(kw, t) + } + / kw:lit("del") t:del_targets() &(lit(";") / tok(NL, "NEWLINE")) { + make_del(kw, make_del_tuple(None, t, None)) + } + + rule yield_stmt() -> Expression<'input, 'a> + = yield_expr() + + rule assert_stmt() -> Assert<'input, 'a> + = kw:lit("assert") test:expression() rest:(c:comma() msg:expression() {(c, msg)})? { + make_assert(kw, test, rest) + } + + // Import statements + + rule import_name() -> Import<'input, 'a> + = kw:lit("import") a:dotted_as_names() { + make_import(kw, a) + } + + rule import_from() -> ImportFrom<'input, 'a> + = from:lit("from") dots:dots()? m:dotted_name() + import:lit("import") als:import_from_targets() { + make_import_from(from, dots.unwrap_or_default(), Some(m), import, als) + } + / from:lit("from") dots:dots() + import:lit("import") als:import_from_targets() { + make_import_from(from, dots, None, import, als) + } + + rule import_from_targets() -> ParenthesizedImportNames<'input, 'a> + = lpar:lpar() als:import_from_as_names() c:comma()? rpar:rpar() { + let mut als = als; + if let (comma@Some(_), Some(mut last)) = (c, als.last_mut()) { + last.comma = comma; + } + (Some(lpar), ImportNames::Aliases(als), Some(rpar)) + } + / als:import_from_as_names() !lit(",") { (None, ImportNames::Aliases(als), None)} + / star:lit("*") { (None, ImportNames::Star(make_importstar()), None) } + + rule import_from_as_names() -> Vec> + = items:separated(, ) { + make_import_from_as_names(items.0, items.1) + } + + rule import_from_as_name() -> ImportAlias<'input, 'a> + = n:name() asname:(kw:lit("as") z:name() {(kw, z)})? { + make_import_alias(NameOrAttribute::N(Box::new(n)), asname) + } + + rule dotted_as_names() -> Vec> + = init:(d:dotted_as_name() c:comma() {d.with_comma(c)})* + last:dotted_as_name() { + concat(init, vec![last]) + } + + rule dotted_as_name() -> ImportAlias<'input, 'a> + = n:dotted_name() asname:(kw:lit("as") z:name() {(kw, z)})? { + make_import_alias(n, asname) + } + + // TODO: why does this diverge from CPython? + rule dotted_name() -> NameOrAttribute<'input, 'a> + = first:name() tail:(dot:lit(".") n:name() {(dot, n)})* { + make_name_or_attr(first, tail) + } + + // Compound statements + + // Common elements + + #[cache] + rule block() -> Suite<'input, 'a> + = n:tok(NL, "NEWLINE") ind:tok(Indent, "INDENT") s:statements() ded:tok(Dedent, "DEDENT") { + make_indented_block(n, ind, s, ded) + } + / s:simple_stmts() { + make_simple_statement_suite(s) + } + + rule decorators() -> Vec> + = (at:lit("@") e:named_expression() nl:tok(NL, "NEWLINE") { + make_decorator(at, e, nl) + } )+ + + // Class definitions + + rule class_def() -> ClassDef<'input, 'a> + = d:decorators() c:class_def_raw() { c.with_decorators(d) } + / class_def_raw() + + rule class_def_raw() -> ClassDef<'input, 'a> + = kw:lit("class") n:name() t:type_params()? arg:(l:lpar() a:arguments()? r:rpar() {(l, a, r)})? + col:lit(":") b:block() {? + make_class_def(kw, n, t, arg, col, b) + } + + // Function definitions + + rule function_def() -> FunctionDef<'input, 'a> + = d:decorators() f:function_def_raw() {f.with_decorators(d)} + / function_def_raw() + + rule _returns() -> Annotation<'input, 'a> + = l:lit("->") e:expression() { + make_annotation(l, e) + } + + rule function_def_raw() -> FunctionDef<'input, 'a> + = def:lit("def") n:name() t:type_params()? op:lit("(") params:params()? + cp:lit(")") ty:_returns()? c:lit(":") b:block() { + make_function_def(None, def, n, t, op, params, cp, ty, c, b) + } + / asy:tok(Async, "ASYNC") def:lit("def") n:name() t:type_params()? op:lit("(") params:params()? + cp:lit(")") ty:_returns()? c:lit(":") b:block() { + make_function_def(Some(asy), def, n, t, op, params, cp, ty, c, b) + } + + // Function parameters + + rule params() -> Parameters<'input, 'a> + = parameters() + + rule parameters() -> Parameters<'input, 'a> + = a:slash_no_default() b:param_no_default()* c:param_with_default()* d:star_etc()? { + make_parameters(Some(a), concat(b, c), d) + } + / a:slash_with_default() b:param_with_default()* d:star_etc()? { + make_parameters(Some(a), b, d) + } + / a:param_no_default()+ b:param_with_default()* d:star_etc()? { + make_parameters(None, concat(a, b), d) + } + / a:param_with_default()+ d:star_etc()? { + make_parameters(None, a, d) + } + / d:star_etc() { + make_parameters(None, vec![], Some(d)) + } + + rule slash_no_default() -> (Vec>, ParamSlash<'input, 'a>) + = a:param_no_default()+ tok:lit("/") com:comma() { + (a, ParamSlash { comma: Some(com), tok }) + } + / a:param_no_default()+ tok:lit("/") &lit(")") { + (a, ParamSlash { comma: None, tok }) + } + + rule slash_with_default() -> (Vec>, ParamSlash<'input, 'a>) + = a:param_no_default()* b:param_with_default()+ tok:lit("/") c:comma() { + (concat(a, b), ParamSlash { comma: Some(c), tok }) + } + / a:param_no_default()* b:param_with_default()+ tok:lit("/") &lit(")") { + (concat(a, b), ParamSlash { comma: None, tok }) + } + + rule star_etc() -> StarEtc<'input, 'a> + = star:lit("*") a:param_no_default() b:param_maybe_default()* kw:kwds()? { + StarEtc(Some(StarArg::Param(Box::new( + add_param_star(a, star)))), b, kw) + } + / star:lit("*") a:param_no_default_star_annotation() b:param_maybe_default()* kw:kwds()? { + StarEtc(Some(StarArg::Param(Box::new( + add_param_star(a, star)))), b, kw) + } + / lit("*") c:comma() b:param_maybe_default()+ kw:kwds()? { + StarEtc(Some(StarArg::Star(Box::new(ParamStar {comma:c }))), b, kw) + } + / kw:kwds() { StarEtc(None, vec![], Some(kw)) } + + rule kwds() -> Param<'input, 'a> + = star:lit("**") a:param_no_default() { + add_param_star(a, star) + } + + rule param_no_default() -> Param<'input, 'a> + = a:param() c:lit(",") { add_param_default(a, None, Some(c)) } + / a:param() &lit(")") {a} + + rule param_no_default_star_annotation() -> Param<'input, 'a> + = a:param_star_annotation() c:lit(",") { add_param_default(a, None, Some(c))} + / a:param_star_annotation() &lit(")") {a} + + rule param_with_default() -> Param<'input, 'a> + = a:param() def:default() c:lit(",") { + add_param_default(a, Some(def), Some(c)) + } + / a:param() def:default() &lit(")") { + add_param_default(a, Some(def), None) + } + + rule param_maybe_default() -> Param<'input, 'a> + = a:param() def:default()? c:lit(",") { + add_param_default(a, def, Some(c)) + } + / a:param() def:default()? &lit(")") { + add_param_default(a, def, None) + } + + rule param() -> Param<'input, 'a> + = n:name() a:annotation()? { + Param {name: n, annotation: a, ..Default::default() } + } + + rule param_star_annotation() -> Param<'input, 'a> + = n:name() a:star_annotation() { + Param {name: n, annotation: Some(a), ..Default::default() } + } + + rule annotation() -> Annotation<'input, 'a> + = col:lit(":") e:expression() { + make_annotation(col, e) + } + + rule star_annotation() -> Annotation<'input, 'a> + = col:lit(":") e:star_expression() { + make_annotation(col, e) + } + + rule default() -> (AssignEqual<'input, 'a>, Expression<'input, 'a>) + = eq:lit("=") ex:expression() { + (make_assign_equal(eq), ex) + } + + rule default_or_starred() -> (AssignEqual<'input, 'a>,Option>, Expression<'input, 'a>) + = eq:lit("=") ex:expression() { + (make_assign_equal(eq), None , ex) + } + / eq:lit("=") star:lit("*") ex:expression() { + // make_star_default(eq, star, ex) + (make_assign_equal(eq), Some(star) , ex) + } + + // If statement + + rule if_stmt() -> If<'input, 'a> + = i:lit("if") a:named_expression() col:lit(":") b:block() elif:elif_stmt() { + make_if(i, a, col, b, Some(OrElse::Elif(elif)), false) + } + / i:lit("if") a:named_expression() col:lit(":") b:block() el:else_block()? { + make_if(i, a, col, b, el.map(OrElse::Else), false) + } + + rule elif_stmt() -> If<'input, 'a> + = i:lit("elif") a:named_expression() col:lit(":") b:block() elif:elif_stmt() { + make_if(i, a, col, b, Some(OrElse::Elif(elif)), true) + } + / i:lit("elif") a:named_expression() col:lit(":") b:block() el:else_block()? { + make_if(i, a, col, b, el.map(OrElse::Else), true) + } + + rule else_block() -> Else<'input, 'a> + = el:lit("else") col:lit(":") b:block() { + make_else(el, col, b) + } + + // While statement + + rule while_stmt() -> While<'input, 'a> + = kw:lit("while") test:named_expression() col:lit(":") b:block() el:else_block()? { + make_while(kw, test, col, b, el) + } + + // For statement + + rule for_stmt() -> For<'input, 'a> + = f:lit("for") t:star_targets() i:lit("in") it:star_expressions() + c:lit(":") b:block() el:else_block()? { + make_for(None, f, t, i, it, c, b, el) + } + / asy:tok(Async, "ASYNC") f:lit("for") t:star_targets() i:lit("in") + it:star_expressions() + c:lit(":") b:block() el:else_block()? { + make_for(Some(asy), f, t, i, it, c, b, el) + } + + // With statement + + rule with_stmt() -> With<'input, 'a> + = kw:lit("with") l:lpar() items:separated_trailer(, ) r:rpar() + col:lit(":") b:block() { + make_with(None, kw, Some(l), comma_separate(items.0, items.1, items.2), Some(r), col, b) + } + / kw:lit("with") items:separated(, ) + col:lit(":") b:block() { + make_with(None, kw, None, comma_separate(items.0, items.1, None), None, col, b) + } + / asy:tok(Async, "ASYNC") kw:lit("with") l:lpar() items:separated_trailer(, ) r:rpar() + col:lit(":") b:block() { + make_with(Some(asy), kw, Some(l), comma_separate(items.0, items.1, items.2), Some(r), col, b) + } + / asy:tok(Async, "ASYNC") kw:lit("with") items:separated(, ) + col:lit(":") b:block() { + make_with(Some(asy), kw, None, comma_separate(items.0, items.1, None), None, col, b) + } + + rule with_item() -> WithItem<'input, 'a> + = e:expression() a:lit("as") t:star_target() &(lit(",") / lit(":") / rpar()) { + make_with_item(e, Some(a), Some(t)) + } + / e:expression() { + make_with_item(e, None, None) + } + + // Try statement + + rule try_stmt() -> Try<'input, 'a> + = kw:lit("try") lit(":") b:block() f:finally_block() { + make_try(kw, b, vec![], None, Some(f)) + } + / kw:lit("try") lit(":") b:block() ex:except_block()+ el:else_block()? + f:finally_block()? { + make_try(kw, b, ex, el, f) + } + + // Note: this is separate because TryStar is a different type in LibCST + rule try_star_stmt() -> TryStar<'input, 'a> + = kw:lit("try") lit(":") b:block() ex:except_star_block()+ + el:else_block()? f:finally_block()? { + make_try_star(kw, b, ex, el, f) + } + + // Except statement + rule except_block() -> ExceptHandler<'input, 'a> + = kw:lit("except") e:expression() a:(k:lit("as") n:name() {(k, n)})? + col:lit(":") b:block() { + make_except(kw, Some(e), a, col, b) + } + / kw:lit("except") e:expression() other:(c:comma() ex:expression() {(c, ex)})+ tc:(c:comma())? + col:lit(":") b:block() { + let tuple = Expression::Tuple(Box::new(Tuple { + elements: comma_separate(expr_to_element(e), other.into_iter().map(|(comma, expr)| (comma, expr_to_element(expr))).collect(), tc), + lpar: vec![], + rpar: vec![], + })); + + make_except(kw, Some(tuple), None, col, b) + } + / kw:lit("except") col:lit(":") b:block() { + make_except(kw, None, None, col, b) + } + + rule except_star_block() -> ExceptStarHandler<'input, 'a> + = kw:lit("except") star:lit("*") e:expression() + a:(k:lit("as") n:name() {(k, n)})? col:lit(":") b:block() { + make_except_star(kw, star, e, a, col, b) + } + / kw:lit("except") star:lit("*") e:expression() other:(c:comma() ex:expression() {(c, ex)})+ tc:(c:comma())? + col:lit(":") b:block() { + let tuple = Expression::Tuple(Box::new(Tuple { + elements: comma_separate(expr_to_element(e), other.into_iter().map(|(comma, expr)| (comma, expr_to_element(expr))).collect(), tc), + lpar: vec![], + rpar: vec![], + })); + + make_except_star(kw, star, tuple, None, col, b) + } + + rule finally_block() -> Finally<'input, 'a> + = kw:lit("finally") col:lit(":") b:block() { + make_finally(kw, col, b) + } + + + // Match statement + + rule match_stmt() -> Match<'input, 'a> + = kw:lit("match") subject:subject_expr() col:lit(":") tok(NL, "NEWLINE") + i:tok(Indent, "INDENT") cases:case_block()+ d:tok(Dedent, "DEDENT") { + make_match(kw, subject, col, i, cases, d) + } + + rule subject_expr() -> Expression<'input, 'a> + = first:star_named_expression() c:comma() rest:star_named_expressions()? { + Expression::Tuple(Box::new( + make_tuple_from_elements(first.with_comma(c), rest.unwrap_or_default())) + ) + } + / named_expression() + + rule case_block() -> MatchCase<'input, 'a> + = kw:lit("case") pattern:patterns() guard:guard()? col:lit(":") body:block() { + make_case(kw, pattern, guard, col, body) + } + + rule guard() -> (TokenRef<'input, 'a>, Expression<'input, 'a>) + = kw:lit("if") exp:named_expression() { (kw, exp) } + + rule patterns() -> MatchPattern<'input, 'a> + = pats:open_sequence_pattern() { + MatchPattern::Sequence(make_list_pattern(None, pats, None)) + } + / pattern() + + rule pattern() -> MatchPattern<'input, 'a> + = as_pattern() + / or_pattern() + + rule as_pattern() -> MatchPattern<'input, 'a> + = pat:or_pattern() kw:lit("as") target:pattern_capture_target() { + make_as_pattern(Some(pat), Some(kw), Some(target)) + } + + rule or_pattern() -> MatchPattern<'input, 'a> + = pats:separated(, ) { + make_or_pattern(pats.0, pats.1) + } + + rule closed_pattern() -> MatchPattern<'input, 'a> + = literal_pattern() + / capture_pattern() + / wildcard_pattern() + / value_pattern() + / group_pattern() + / sequence_pattern() + / mapping_pattern() + / class_pattern() + + rule literal_pattern() -> MatchPattern<'input, 'a> + = val:signed_number() !(lit("+") / lit("-")) { make_match_value(val) } + / val:complex_number() { make_match_value(val) } + / val:strings() { make_match_value(val.into()) } + / n:lit("None") { make_match_singleton(make_name(n)) } + / n:lit("True") { make_match_singleton(make_name(n)) } + / n:lit("False") { make_match_singleton(make_name(n)) } + + rule literal_expr() -> Expression<'input, 'a> + = val:signed_number() !(lit("+") / lit("-")) { val } + / val:complex_number() { val } + / val:strings() { val.into() } + / n:lit("None") { Expression::Name(Box::new(make_name(n))) } + / n:lit("True") { Expression::Name(Box::new(make_name(n))) } + / n:lit("False") { Expression::Name(Box::new(make_name(n))) } + + rule complex_number() -> Expression<'input, 'a> + = re:signed_real_number() op:(lit("+")/lit("-")) im:imaginary_number() {? + make_binary_op(re, op, im).map_err(|_| "complex number") + } + + rule signed_number() -> Expression<'input, 'a> + = n:tok(Number, "number") { make_number(n) } + / op:lit("-") n:tok(Number, "number") {? + make_unary_op(op, make_number(n)).map_err(|_| "signed number") + } + + rule signed_real_number() -> Expression<'input, 'a> + = real_number() + / op:lit("-") n:real_number() {? + make_unary_op(op, n).map_err(|_| "signed real number") + } + + rule real_number() -> Expression<'input, 'a> + = n:tok(Number, "number") {? ensure_real_number(n) } + + rule imaginary_number() -> Expression<'input, 'a> + = n:tok(Number, "number") {? ensure_imaginary_number(n) } + + rule capture_pattern() -> MatchPattern<'input, 'a> + = t:pattern_capture_target() { make_as_pattern(None, None, Some(t)) } + + rule pattern_capture_target() -> Name<'input, 'a> + = !lit("_") n:name() !(lit(".") / lit("(") / lit("=")) { n } + + rule wildcard_pattern() -> MatchPattern<'input, 'a> + = lit("_") { make_as_pattern(None, None, None) } + + rule value_pattern() -> MatchPattern<'input, 'a> + = v:attr() !(lit(".") / lit("(") / lit("=")) { + make_match_value(v.into()) + } + + // In upstream attr and name_or_attr are mutually recursive, but rust-peg + // doesn't support this yet. + rule attr() -> NameOrAttribute<'input, 'a> + = &(name() lit(".")) v:name_or_attr() { v } + + #[cache_left_rec] + rule name_or_attr() -> NameOrAttribute<'input, 'a> + = val:name_or_attr() d:lit(".") attr:name() { + NameOrAttribute::A(Box::new(make_attribute(val.into(), d, attr))) + } + / n:name() { NameOrAttribute::N(Box::new(n)) } + + rule group_pattern() -> MatchPattern<'input, 'a> + = l:lpar() pat:pattern() r:rpar() { pat.with_parens(l, r) } + + rule sequence_pattern() -> MatchPattern<'input, 'a> + = l:lbrak() pats:maybe_sequence_pattern()? r:rbrak() { + MatchPattern::Sequence( + make_list_pattern(Some(l), pats.unwrap_or_default(), Some(r)) + ) + } + / l:lpar() pats:open_sequence_pattern()? r:rpar() { + MatchPattern::Sequence(make_tuple_pattern(l, pats.unwrap_or_default(), r)) + } + + rule open_sequence_pattern() -> Vec> + = pat:maybe_star_pattern() c:comma() pats:maybe_sequence_pattern()? { + make_open_sequence_pattern(pat, c, pats.unwrap_or_default()) + } + + rule maybe_sequence_pattern() -> Vec> + = pats:separated_trailer(, ) { + comma_separate(pats.0, pats.1, pats.2) + } + + rule maybe_star_pattern() -> StarrableMatchSequenceElement<'input, 'a> + = s:star_pattern() { StarrableMatchSequenceElement::Starred(s) } + / p:pattern() { + StarrableMatchSequenceElement::Simple( + make_match_sequence_element(p) + ) + } + + rule star_pattern() -> MatchStar<'input, 'a> + = star:lit("*") t:pattern_capture_target() {make_match_star(star, Some(t))} + / star:lit("*") t:wildcard_pattern() { make_match_star(star, None) } + + rule mapping_pattern() -> MatchPattern<'input, 'a> + = l:lbrace() r:rbrace() { + make_match_mapping(l, vec![], None, None, None, None, r) + } + / l:lbrace() rest:double_star_pattern() trail:comma()? r:rbrace() { + make_match_mapping(l, vec![], None, Some(rest.0), Some(rest.1), trail, r) + } + / l:lbrace() items:items_pattern() c:comma() rest:double_star_pattern() + trail:comma()? r:rbrace() { + make_match_mapping(l, items, Some(c), Some(rest.0), Some(rest.1), trail, r) + } + / l:lbrace() items:items_pattern() trail:comma()? r:rbrace() { + make_match_mapping(l, items, trail, None, None, None, r) + } + + rule items_pattern() -> Vec> + = pats:separated(, ) { + comma_separate(pats.0, pats.1, None) + } + + rule key_value_pattern() -> MatchMappingElement<'input, 'a> + = key:(literal_expr() / a:attr() {a.into()}) colon:lit(":") pat:pattern() { + make_match_mapping_element(key, colon, pat) + } + + rule double_star_pattern() -> (TokenRef<'input, 'a>, Name<'input, 'a>) + = star:lit("**") n:pattern_capture_target() { (star, n) } + + rule class_pattern() -> MatchPattern<'input, 'a> + = cls:name_or_attr() l:lit("(") r:lit(")") { + make_class_pattern(cls, l, vec![], None, vec![], None, r) + } + / cls:name_or_attr() l:lit("(") pats:positional_patterns() c:comma()? r:lit(")") { + make_class_pattern(cls, l, pats, c, vec![], None, r) + } + / cls:name_or_attr() l:lit("(") kwds:keyword_patterns() c:comma()? r:lit(")") { + make_class_pattern(cls, l, vec![], None, kwds, c, r) + } + / cls:name_or_attr() l:lit("(") pats:positional_patterns() c:comma() + kwds:keyword_patterns() trail:comma()? r:lit(")") { + make_class_pattern(cls, l, pats, Some(c), kwds, trail, r) + } + + rule positional_patterns() -> Vec> + = pats:separated(, ) { + comma_separate(pats.0, pats.1, None) + } + + rule keyword_patterns() -> Vec> + = pats:separated(, ) { + comma_separate(pats.0, pats.1, None) + } + + rule keyword_pattern() -> MatchKeywordElement<'input, 'a> + = arg:name() eq:lit("=") value:pattern() { + make_match_keyword_element(arg, eq, value) + } + + // Type statement + + rule type_stmt() -> TypeAlias<'input, 'a> + = t:lit("type") n:name() ps:type_params()? eq:lit("=") v:expression() { + make_type_alias(t, n, ps, eq, v) + } + + // Type parameter declaration + + rule type_params() -> TypeParameters<'input, 'a> + = lb:lbrak() ps:separated_trailer(, ) rb:rbrak() { + make_type_parameters(lb, comma_separate(ps.0, ps.1, ps.2), rb) + } + + rule type_param() -> TypeParam<'input, 'a> + = n:name() b:type_param_bound()? def:default()? { make_type_var(n, b, def) } + / s:lit("*") n:name() def:default_or_starred()? { make_type_var_tuple(s, n, def) } + / s:lit("**") n:name() def:default()? { make_param_spec(s, n, def) } + + + rule type_param_bound() -> TypeParamBound<'input, 'a> + = c:lit(":") e:expression() { make_type_param_bound(c, e) } + // Expressions + + #[cache] + rule expression() -> Expression<'input, 'a> + = _conditional_expression() + / lambdef() + + rule _conditional_expression() -> Expression<'input, 'a> + = body:disjunction() i:lit("if") test:disjunction() e:lit("else") oe:expression() { + Expression::IfExp(Box::new(make_ifexp(body, i, test, e, oe))) + } + / disjunction() + + rule yield_expr() -> Expression<'input, 'a> + = y:lit("yield") f:lit("from") a:expression() { + Expression::Yield(Box::new(make_yield(y, Some(f), Some(a)))) + } + / y:lit("yield") a:star_expressions()? { + Expression::Yield(Box::new(make_yield(y, None, a))) + } + + rule star_expressions() -> Expression<'input, 'a> + = first:star_expression() + rest:(comma:comma() e:star_expression() { (comma, expr_to_element(e)) })+ + comma:comma()? { + Expression::Tuple(Box::new(make_tuple(expr_to_element(first), rest, comma, None, None))) + } + / e:star_expression() comma:comma() { + Expression::Tuple(Box::new(make_tuple(expr_to_element(e), vec![], Some(comma), None, None))) + } + / star_expression() + + #[cache] + rule star_expression() -> Expression<'input, 'a> + = star:lit("*") e:bitwise_or() { + Expression::StarredElement(Box::new(make_starred_element(star, expr_to_element(e)))) + } + / expression() + + rule star_named_expressions() -> Vec> + = exps:separated_trailer(, ) { + comma_separate(exps.0, exps.1, exps.2) + } + + rule star_named_expression() -> Element<'input, 'a> + = star:lit("*") e:bitwise_or() { + Element::Starred(Box::new(make_starred_element(star, expr_to_element(e)))) + } + / e:named_expression() { expr_to_element(e) } + + rule named_expression() -> Expression<'input, 'a> + = a:name() op:lit(":=") b:expression() { + Expression::NamedExpr(Box::new(make_named_expr(a, op, b))) + } + / e:expression() !lit(":=") { e } + + #[cache] + rule disjunction() -> Expression<'input, 'a> + = a:conjunction() b:(or:lit("or") inner:conjunction() { (or, inner) })+ {? + make_boolean_op(a, b).map_err(|e| "expected disjunction") + } + / conjunction() + + #[cache] + rule conjunction() -> Expression<'input, 'a> + = a:inversion() b:(and:lit("and") inner:inversion() { (and, inner) })+ {? + make_boolean_op(a, b).map_err(|e| "expected conjunction") + } + / inversion() + + #[cache] + rule inversion() -> Expression<'input, 'a> + = not:lit("not") a:inversion() {? + make_unary_op(not, a).map_err(|e| "expected inversion") + } + / comparison() + + // Comparison operators + + #[cache] + rule comparison() -> Expression<'input, 'a> + = a:bitwise_or() b:compare_op_bitwise_or_pair()+ { make_comparison(a, b) } + / bitwise_or() + + // This implementation diverges slightly from CPython (3.9) to avoid bloating + // the parser cache and increase readability. + #[cache] + rule compare_op_bitwise_or_pair() -> (CompOp<'input, 'a>, Expression<'input, 'a>) + = _op_bitwise_or("==") + / _op_bitwise_or("!=") // TODO: support barry_as_flufl + / _op_bitwise_or("<=") + / _op_bitwise_or("<") + / _op_bitwise_or(">=") + / _op_bitwise_or(">") + / _op_bitwise_or2("not", "in") + / _op_bitwise_or("in") + / _op_bitwise_or2("is", "not") + / _op_bitwise_or("is") + + rule _op_bitwise_or(o: &'static str) -> (CompOp<'input, 'a>, Expression<'input, 'a>) + = op:lit(o) e:bitwise_or() {? + make_comparison_operator(op) + .map(|op| (op, e)) + .map_err(|_| "comparison") + } + + rule _op_bitwise_or2(first: &'static str, second: &'static str) -> (CompOp<'input, 'a>, Expression<'input, 'a>) + = f:lit(first) s:lit(second) e:bitwise_or() {? + make_comparison_operator_2(f, s) + .map(|op| (op, e)) + .map_err(|_| "comparison") + } + + #[cache_left_rec] + rule bitwise_or() -> Expression<'input, 'a> + = a:bitwise_or() op:lit("|") b:bitwise_xor() {? + make_binary_op(a, op, b).map_err(|e| "expected bitwise_or") + } + / bitwise_xor() + + #[cache_left_rec] + rule bitwise_xor() -> Expression<'input, 'a> + = a:bitwise_xor() op:lit("^") b:bitwise_and() {? + make_binary_op(a, op, b).map_err(|e| "expected bitwise_xor") + } + / bitwise_and() + + #[cache_left_rec] + rule bitwise_and() -> Expression<'input, 'a> + = a:bitwise_and() op:lit("&") b:shift_expr() {? + make_binary_op(a, op, b).map_err(|e| "expected bitwise_and") + } + / shift_expr() + + #[cache_left_rec] + rule shift_expr() -> Expression<'input, 'a> + = a:shift_expr() op:lit("<<") b:sum() {? + make_binary_op(a, op, b).map_err(|e| "expected shift_expr") + } + / a:shift_expr() op:lit(">>") b:sum() {? + make_binary_op(a, op, b).map_err(|e| "expected shift_expr") + } + / sum() + + #[cache_left_rec] + rule sum() -> Expression<'input, 'a> + = a:sum() op:lit("+") b:term() {? + make_binary_op(a, op, b).map_err(|e| "expected sum") + } + / a:sum() op:lit("-") b:term() {? + make_binary_op(a, op, b).map_err(|e| "expected sum") + } + / term() + + #[cache_left_rec] + rule term() -> Expression<'input, 'a> + = a:term() op:lit("*") b:factor() {? + make_binary_op(a, op, b).map_err(|e| "expected term") + } + / a:term() op:lit("/") b:factor() {? + make_binary_op(a, op, b).map_err(|e| "expected term") + } + / a:term() op:lit("//") b:factor() {? + make_binary_op(a, op, b).map_err(|e| "expected term") + } + / a:term() op:lit("%") b:factor() {? + make_binary_op(a, op, b).map_err(|e| "expected term") + } + / a:term() op:lit("@") b:factor() {? + make_binary_op(a, op, b).map_err(|e| "expected term") + } + / factor() + + #[cache] + rule factor() -> Expression<'input, 'a> + = op:lit("+") a:factor() {? + make_unary_op(op, a).map_err(|e| "expected factor") + } + / op:lit("-") a:factor() {? + make_unary_op(op, a).map_err(|e| "expected factor") + } + / op:lit("~") a:factor() {? + make_unary_op(op, a).map_err(|e| "expected factor") + } + / power() + + rule power() -> Expression<'input, 'a> + = a:await_primary() op:lit("**") b:factor() {? + make_binary_op(a, op, b).map_err(|e| "expected power") + } + / await_primary() + + // Primary elements + + rule await_primary() -> Expression<'input, 'a> + = aw:tok(AWAIT, "AWAIT") e:primary() { + Expression::Await(Box::new(make_await(aw, e))) + } + / primary() + + #[cache_left_rec] + rule primary() -> Expression<'input, 'a> + = v:primary() dot:lit(".") attr:name() { + Expression::Attribute(Box::new(make_attribute(v, dot, attr))) + } + / a:primary() b:genexp() { + Expression::Call(Box::new(make_genexp_call(a, b))) + } + / f:primary() lpar:lit("(") arg:arguments()? rpar:lit(")") { + Expression::Call(Box::new(make_call(f, lpar, arg.unwrap_or_default(), rpar))) + } + / v:primary() lbrak:lbrak() s:slices() rbrak:rbrak() { + Expression::Subscript(Box::new(make_subscript(v, lbrak, s, rbrak))) + } + / atom() + + rule slices() -> Vec> + = s:slice() !lit(",") { vec![SubscriptElement { slice: s, comma: None }] } + / slices:separated_trailer(, ) { + make_slices(slices.0, slices.1, slices.2) + } + + rule slice() -> BaseSlice<'input, 'a> + = l:expression()? col:lit(":") u:expression()? + rest:(c:lit(":") s:expression()? {(c, s)})? { + make_slice(l, col, u, rest) + } + / e:starred_expression() { make_index_from_arg(e) } + / v:named_expression() { make_index(v) } + + rule atom() -> Expression<'input, 'a> + = n:name() { Expression::Name(Box::new(n)) } + / n:lit("True") { Expression::Name(Box::new(make_name(n))) } + / n:lit("False") { Expression::Name(Box::new(make_name(n))) } + / n:lit("None") { Expression::Name(Box::new(make_name(n))) } + / &(tok(STRING, "") / tok(FStringStart, "") / tok(TStringStart, "")) s:strings() {s.into()} + / n:tok(Number, "NUMBER") { make_number(n) } + / &lit("(") e:(tuple() / group() / (g:genexp() {Expression::GeneratorExp(Box::new(g))})) {e} + / &lit("[") e:(list() / listcomp()) {e} + / &lit("{") e:(dict() / set() / dictcomp() / setcomp()) {e} + / lit("...") { Expression::Ellipsis(Box::new(Ellipsis {lpar: vec![], rpar: vec![]}))} + + rule group() -> Expression<'input, 'a> + = lpar:lpar() e:(yield_expr() / named_expression()) rpar:rpar() { + e.with_parens(lpar, rpar) + } + + // Lambda functions + + rule lambdef() -> Expression<'input, 'a> + = kw:lit("lambda") p:lambda_params()? c:lit(":") b:expression() { + Expression::Lambda(Box::new(make_lambda(kw, p.unwrap_or_default(), c, b))) + } + + rule lambda_params() -> Parameters<'input, 'a> + = lambda_parameters() + + // lambda_parameters etc. duplicates parameters but without annotations or type + // comments, and if there's no comma after a parameter, we expect a colon, not a + // close parenthesis. + + rule lambda_parameters() -> Parameters<'input, 'a> + = a:lambda_slash_no_default() b:lambda_param_no_default()* + c:lambda_param_with_default()* d:lambda_star_etc()? { + make_parameters(Some(a), concat(b, c), d) + } + / a:lambda_slash_with_default() b:lambda_param_with_default()* + d:lambda_star_etc()? { + make_parameters(Some(a), b, d) + } + / a:lambda_param_no_default()+ b:lambda_param_with_default()* + d:lambda_star_etc()? { + make_parameters(None, concat(a, b), d) + } + / a:lambda_param_with_default()+ d:lambda_star_etc()? { + make_parameters(None, a, d) + } + / d:lambda_star_etc() { + make_parameters(None, vec![], Some(d)) + } + + rule lambda_slash_no_default() -> (Vec>, ParamSlash<'input, 'a>) + = a:lambda_param_no_default()+ tok:lit("/") com:comma() { + (a, ParamSlash { comma: Some(com), tok } ) + } + / a:lambda_param_no_default()+ tok:lit("/") &lit(":") { + (a, ParamSlash { comma: None, tok }) + } + + rule lambda_slash_with_default() -> (Vec>, ParamSlash<'input, 'a>) + = a:lambda_param_no_default()* b:lambda_param_with_default()+ tok:lit("/") c:comma(){ + (concat(a, b), ParamSlash { comma: Some(c), tok }) + } + / a:lambda_param_no_default()* b:lambda_param_with_default()+ tok:lit("/") &lit(":") { + (concat(a, b), ParamSlash { comma: None, tok }) + } + + rule lambda_star_etc() -> StarEtc<'input, 'a> + = star:lit("*") a:lambda_param_no_default() + b:lambda_param_maybe_default()* kw:lambda_kwds()? { + StarEtc(Some(StarArg::Param( + Box::new(add_param_star(a, star)) + )), b, kw) + } + / lit("*") c:comma() b:lambda_param_maybe_default()+ kw:lambda_kwds()? { + StarEtc(Some(StarArg::Star(Box::new(ParamStar {comma: c}))), b, kw) + } + / kw:lambda_kwds() { StarEtc(None, vec![], Some(kw)) } + + rule lambda_kwds() -> Param<'input, 'a> + = star:lit("**") a:lambda_param_no_default() { + add_param_star(a, star) + } + + rule lambda_param_no_default() -> Param<'input, 'a> + = a:lambda_param() c:lit(",") { + add_param_default(a, None, Some(c)) + } + / a:lambda_param() &lit(":") {a} + + rule lambda_param_with_default() -> Param<'input, 'a> + = a:lambda_param() def:default() c:lit(",") { + add_param_default(a, Some(def), Some(c)) + } + / a:lambda_param() def:default() &lit(":") { + add_param_default(a, Some(def), None) + } + + rule lambda_param_maybe_default() -> Param<'input, 'a> + = a:lambda_param() def:default()? c:lit(",") { + add_param_default(a, def, Some(c)) + } + / a:lambda_param() def:default()? &lit(":") { + add_param_default(a, def, None) + } + + rule lambda_param() -> Param<'input, 'a> + = name:name() { Param { name, ..Default::default() } } + + // Literals + + rule strings() -> String<'input, 'a> + = s:(str:tok(STRING, "STRING") t:&_ {(make_string(str), t)} + / str:fstring() t:&_ {(String::Formatted(str), t)} / str:tstring() t:&_ {(String::Templated(str), t)})+ {? + make_strings(s) + } + + rule list() -> Expression<'input, 'a> + = lbrak:lbrak() e:star_named_expressions()? rbrak:rbrak() { + Expression::List(Box::new( + make_list(lbrak, e.unwrap_or_default(), rbrak)) + ) + } + + rule tuple() -> Expression<'input, 'a> + = lpar:lpar() first:star_named_expression() &lit(",") + rest:(c:comma() e:star_named_expression() {(c, e)})* + trailing_comma:comma()? rpar:rpar() { + Expression::Tuple(Box::new( + make_tuple(first, rest, trailing_comma, Some(lpar), Some(rpar)) + )) + } + / lpar:lpar() rpar:lit(")") { + Expression::Tuple(Box::new(Tuple::default().with_parens( + lpar, RightParen { rpar_tok: rpar } + )))} + + rule set() -> Expression<'input, 'a> + = lbrace:lbrace() e:star_named_expressions()? rbrace:rbrace() { + Expression::Set(Box::new(make_set(lbrace, e.unwrap_or_default(), rbrace))) + } + + // Dicts + + rule dict() -> Expression<'input, 'a> + = lbrace:lbrace() els:double_starred_keypairs()? rbrace:rbrace() { + Expression::Dict(Box::new(make_dict(lbrace, els.unwrap_or_default(), rbrace))) + } + + + rule double_starred_keypairs() -> Vec> + = pairs:separated_trailer(, ) { + make_double_starred_keypairs(pairs.0, pairs.1, pairs.2) + } + + rule double_starred_kvpair() -> DictElement<'input, 'a> + = s:lit("**") e:bitwise_or() { + DictElement::Starred(make_double_starred_element(s, e)) + } + / k:kvpair() { make_dict_element(k) } + + rule kvpair() -> (Expression<'input, 'a>, TokenRef<'input, 'a>, Expression<'input, 'a>) + = k:expression() colon:lit(":") v:expression() { (k, colon, v) } + + // Comprehensions & generators + + rule for_if_clauses() -> CompFor<'input, 'a> + = c:for_if_clause()+ {? merge_comp_fors(c) } + + rule for_if_clause() -> CompFor<'input, 'a> + = asy:_async() f:lit("for") tgt:star_targets() i:lit("in") + iter:disjunction() ifs:_comp_if()* { + make_for_if(Some(asy), f, tgt, i, iter, ifs) + } + / f:lit("for") tgt:star_targets() i:lit("in") + iter:disjunction() ifs:_comp_if()* { + make_for_if(None, f, tgt, i, iter, ifs) + } + + rule _comp_if() -> CompIf<'input, 'a> + = kw:lit("if") cond:disjunction() { + make_comp_if(kw, cond) + } + + rule listcomp() -> Expression<'input, 'a> + = lbrak:lbrak() elt:named_expression() comp:for_if_clauses() rbrak:rbrak() { + Expression::ListComp(Box::new(make_list_comp(lbrak, elt, comp, rbrak))) + } + + rule setcomp() -> Expression<'input, 'a> + = l:lbrace() elt:named_expression() comp:for_if_clauses() r:rbrace() { + Expression::SetComp(Box::new(make_set_comp(l, elt, comp, r))) + } + + rule genexp() -> GeneratorExp<'input, 'a> + = lpar:lpar() g:_bare_genexp() rpar:rpar() { + g.with_parens(lpar, rpar) + } + + rule _bare_genexp() -> GeneratorExp<'input, 'a> + = elt:named_expression() comp:for_if_clauses() { + make_bare_genexp(elt, comp) + } + + rule dictcomp() -> Expression<'input, 'a> + = lbrace:lbrace() elt:kvpair() comp:for_if_clauses() rbrace:rbrace() { + Expression::DictComp(Box::new(make_dict_comp(lbrace, elt, comp, rbrace))) + } + + // Function call arguments + + rule arguments() -> Vec> + = a:args() trail:comma()? &lit(")") {add_arguments_trailing_comma(a, trail)} + + rule args() -> Vec> + = first:_posarg() + rest:(c:comma() a:_posarg() {(c, a)})* + kw:(c:comma() k:kwargs() {(c, k)})? { + let (trail, kw) = kw.map(|(x,y)| (Some(x), Some(y))).unwrap_or((None, None)); + concat( + comma_separate(first, rest, trail), + kw.unwrap_or_default(), + ) + } + / kwargs() + + rule _posarg() -> Arg<'input, 'a> + = a:(starred_expression() / e:named_expression() { make_arg(e) }) + !lit("=") { a } + + rule kwargs() -> Vec> + = sitems:separated(, ) + scomma:comma() + ditems:separated(, ) { + concat( + comma_separate(sitems.0, sitems.1, Some(scomma)), + comma_separate(ditems.0, ditems.1, None), + ) + } + / items:separated(, ) { + comma_separate(items.0, items.1, None) + } + / items:separated(, ) { + comma_separate(items.0, items.1, None) + } + + rule starred_expression() -> Arg<'input, 'a> + = star:lit("*") e:expression() { make_star_arg(star, e) } + + rule kwarg_or_starred() -> Arg<'input, 'a> + = _kwarg() + / starred_expression() + + rule kwarg_or_double_starred() -> Arg<'input, 'a> + = _kwarg() + / star:lit("**") e:expression() { make_star_arg(star, e) } + + rule _kwarg() -> Arg<'input, 'a> + = n:name() eq:lit("=") v:expression() { + make_kwarg(n, eq, v) + } + + // Assignment targets + // Generic targets + + rule star_targets() -> AssignTargetExpression<'input, 'a> + = a:star_target() !lit(",") {a} + / targets:separated_trailer(, ) { + AssignTargetExpression::Tuple(Box::new( + make_tuple(targets.0, targets.1, targets.2, None, None) + )) + } + + rule star_targets_list_seq() -> Vec> + = targets:separated_trailer(, ) { + comma_separate(targets.0, targets.1, targets.2) + } + + // This differs from star_targets below because it requires at least two items + // in the tuple + rule star_targets_tuple_seq() -> Tuple<'input, 'a> + = first:(t:star_target() {assign_target_to_element(t)}) + rest:(c:comma() t:star_target() {(c, assign_target_to_element(t))})+ + trail:comma()? { + make_tuple(first, rest, trail, None, None) + } + / t:star_target() trail:comma()? { + make_tuple(assign_target_to_element(t), vec![], trail, None, None) + } + + #[cache] + rule star_target() -> AssignTargetExpression<'input, 'a> + = star:lit("*") !lit("*") t:star_target() { + AssignTargetExpression::StarredElement(Box::new( + make_starred_element(star, assign_target_to_element(t)) + )) + } + / target_with_star_atom() + + #[cache] + rule target_with_star_atom() -> AssignTargetExpression<'input, 'a> + = a:t_primary() dot:lit(".") n:name() !t_lookahead() { + AssignTargetExpression::Attribute(Box::new(make_attribute(a, dot, n))) + } + / a:t_primary() lbrak:lbrak() s:slices() rbrak:rbrak() !t_lookahead() { + AssignTargetExpression::Subscript(Box::new( + make_subscript(a, lbrak, s, rbrak) + )) + } + / a:star_atom() {a} + + rule star_atom() -> AssignTargetExpression<'input, 'a> + = a:name() { AssignTargetExpression::Name(Box::new(a)) } + / lpar:lpar() a:target_with_star_atom() rpar:rpar() { a.with_parens(lpar, rpar) } + / lpar:lpar() a:star_targets_tuple_seq()? rpar:rpar() { + AssignTargetExpression::Tuple(Box::new( + a.unwrap_or_default().with_parens(lpar, rpar) + )) + } + / lbrak:lbrak() a:star_targets_list_seq()? rbrak:rbrak() { + AssignTargetExpression::List(Box::new( + make_list(lbrak, a.unwrap_or_default(), rbrak) + )) + } + + rule single_target() -> AssignTargetExpression<'input, 'a> + = single_subscript_attribute_target() + / n:name() { AssignTargetExpression::Name(Box::new(n)) } + / lpar:lpar() t:single_target() rpar:rpar() { t.with_parens(lpar, rpar) } + + rule single_subscript_attribute_target() -> AssignTargetExpression<'input, 'a> + = a:t_primary() dot:lit(".") n:name() !t_lookahead() { + AssignTargetExpression::Attribute(Box::new(make_attribute(a, dot, n))) + } + / a:t_primary() lbrak:lbrak() s:slices() rbrak:rbrak() !t_lookahead() { + AssignTargetExpression::Subscript(Box::new( + make_subscript(a, lbrak, s, rbrak) + )) + } + + + #[cache_left_rec] + rule t_primary() -> Expression<'input, 'a> + = value:t_primary() dot:lit(".") attr:name() &t_lookahead() { + Expression::Attribute(Box::new(make_attribute(value, dot, attr))) + } + / v:t_primary() l:lbrak() s:slices() r:rbrak() &t_lookahead() { + Expression::Subscript(Box::new(make_subscript(v, l, s, r))) + } + / f:t_primary() gen:genexp() &t_lookahead() { + Expression::Call(Box::new(make_genexp_call(f, gen))) + } + / f:t_primary() lpar:lit("(") arg:arguments()? rpar:lit(")") &t_lookahead() { + Expression::Call(Box::new(make_call(f, lpar, arg.unwrap_or_default(), rpar))) + } + / a:atom() &t_lookahead() {a} + + rule t_lookahead() -> () + = (lit("(") / lit("[") / lit(".")) {} + + // Targets for del statements + + rule del_targets() -> Vec> + = t:separated_trailer(, ) { + comma_separate(t.0, t.1, t.2) + } + + rule del_target() -> DelTargetExpression<'input, 'a> + = a:t_primary() d:lit(".") n:name() !t_lookahead() { + DelTargetExpression::Attribute(Box::new(make_attribute(a, d, n))) + } + / a:t_primary() lbrak:lbrak() s:slices() rbrak:rbrak() !t_lookahead() { + DelTargetExpression::Subscript(Box::new( + make_subscript(a, lbrak, s, rbrak) + )) + } + / del_t_atom() + + rule del_t_atom() -> DelTargetExpression<'input, 'a> + = n:name() { DelTargetExpression::Name(Box::new(n)) } + / l:lpar() d:del_target() r:rpar() { d.with_parens(l, r) } + / l:lpar() d:del_targets()? r:rpar() { + make_del_tuple(Some(l), d.unwrap_or_default(), Some(r)) + } + / l:lbrak() d:del_targets()? r:rbrak() { + DelTargetExpression::List(Box::new( + make_list(l, d.unwrap_or_default(), r) + )) + } + + // F-strings + + rule fstring() -> FormattedString<'input, 'a> + = start:tok(FStringStart, "f\"") + parts:(_f_string() / _f_replacement())* + end:tok(FStringEnd, "\"") { + make_fstring(start.string, parts, end.string) + } + + rule _f_string() -> FormattedStringContent<'input, 'a> + = t:tok(FStringString, "f-string contents") { + FormattedStringContent::Text(make_fstringtext(t.string)) + } + + rule _f_replacement() -> FormattedStringContent<'input, 'a> + = lb:lit("{") e:_f_expr() eq:lit("=")? + conv:(t:lit("!") c:_f_conversion() {(t,c)})? + spec:(t:lit(":") s:_f_spec() {(t,s)})? + rb:lit("}") { + FormattedStringContent::Expression(Box::new( + make_fstring_expression(lb, e, eq, conv, spec, rb) + )) + } + + rule _f_expr() -> Expression<'input, 'a> + = (g:_bare_genexp() {Expression::GeneratorExp(Box::new(g))}) + / star_expressions() + / yield_expr() + + rule _f_conversion() -> &'a str + = lit("r") {"r"} / lit("s") {"s"} / lit("a") {"a"} + + rule _f_spec() -> Vec> + = (_f_string() / _f_replacement())* + + // T-strings + + rule tstring() -> TemplatedString<'input, 'a> + = start:tok(TStringStart, "t\"") + parts:(_t_string() / _t_replacement())* + end:tok(TStringEnd, "\"") { + make_tstring(start.string, parts, end.string) + } + + rule _t_string() -> TemplatedStringContent<'input, 'a> + = t:tok(TStringString, "t-string contents") { + TemplatedStringContent::Text(make_tstringtext(t.string)) + } + + + rule _t_replacement() -> TemplatedStringContent<'input, 'a> + = lb:lit("{") e:annotated_rhs() eq:lit("=")? + conv:(t:lit("!") c:_f_conversion() {(t,c)})? + spec:(t:lit(":") s:_t_spec() {(t,s)})? + rb:lit("}") { + TemplatedStringContent::Expression(Box::new( + make_tstring_expression(lb, e, eq, conv, spec, rb) + )) + } + + rule _t_spec() -> Vec> + = (_t_string() / _t_replacement())* + + // CST helpers + + rule comma() -> Comma<'input, 'a> + = c:lit(",") { make_comma(c) } + + rule dots() -> Vec> + = ds:((dot:lit(".") { make_dot(dot) })+ + / tok:lit("...") { + vec![make_dot(tok), make_dot(tok), make_dot(tok)]} + )+ { ds.into_iter().flatten().collect() } + + rule lpar() -> LeftParen<'input, 'a> + = a:lit("(") { make_lpar(a) } + + rule rpar() -> RightParen<'input, 'a> + = a:lit(")") { make_rpar(a) } + + rule lbrak() -> LeftSquareBracket<'input, 'a> + = tok:lit("[") { make_left_bracket(tok) } + + rule rbrak() -> RightSquareBracket<'input, 'a> + = tok:lit("]") { make_right_bracket(tok) } + + rule lbrace() -> LeftCurlyBrace<'input, 'a> + = tok:lit("{") { make_left_brace(tok) } + + rule rbrace() -> RightCurlyBrace<'input, 'a> + = tok:lit("}") { make_right_brace(tok) } + + /// matches any token, not just whitespace + rule _() -> TokenRef<'input, 'a> + = [t] { t } + + rule lit(lit: &'static str) -> TokenRef<'input, 'a> + = [t] {? if t.string == lit { Ok(t) } else { Err(lit) } } + + rule tok(tok: TokType, err: &'static str) -> TokenRef<'input, 'a> + = [t] {? if t.r#type == tok { Ok(t) } else { Err(err) } } + + rule name() -> Name<'input, 'a> + = !( lit("False") / lit("None") / lit("True") / lit("and") / lit("as") / lit("assert") / lit("async") / lit("await") + / lit("break") / lit("class") / lit("continue") / lit("def") / lit("del") / lit("elif") / lit("else") + / lit("except") / lit("finally") / lit("for") / lit("from") / lit("global") / lit("if") / lit("import") + / lit("in") / lit("is") / lit("lambda") / lit("nonlocal") / lit("not") / lit("or") / lit("pass") / lit("raise") + / lit("return") / lit("try") / lit("while") / lit("with") / lit("yield") + ) + t:tok(NameTok, "NAME") {make_name(t)} + + rule _async() -> TokenRef<'input, 'a> + = tok(Async, "ASYNC") + + rule separated_trailer(el: rule, sep: rule) -> (El, Vec<(Sep, El)>, Option) + = e:el() rest:(s:sep() e:el() {(s, e)})* trailer:sep()? {(e, rest, trailer)} + + rule separated(el: rule, sep: rule) -> (El, Vec<(Sep, El)>) + = e:el() rest:(s:sep() e:el() {(s, e)})* {(e, rest)} + + rule traced(e: rule) -> T = + &(_* { + #[cfg(feature = "trace")] + { + println!("[PEG_INPUT_START]"); + println!("{}", input); + println!("[PEG_TRACE_START]"); + } + }) + e:e()? {? + #[cfg(feature = "trace")] + println!("[PEG_TRACE_STOP]"); + e.ok_or("") + } + + } +} + +#[allow(clippy::too_many_arguments)] +fn make_function_def<'input, 'a>( + async_tok: Option>, + def_tok: TokenRef<'input, 'a>, + name: Name<'input, 'a>, + type_parameters: Option>, + open_paren_tok: TokenRef<'input, 'a>, + params: Option>, + close_paren_tok: TokenRef<'input, 'a>, + returns: Option>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, +) -> FunctionDef<'input, 'a> { + let asynchronous = async_tok.as_ref().map(|_| make_async()); + FunctionDef { + name, + type_parameters, + params: params.unwrap_or_default(), + body, + decorators: Default::default(), + returns, + asynchronous, + async_tok, + def_tok, + open_paren_tok, + close_paren_tok, + colon_tok, + } +} + +fn make_decorator<'input, 'a>( + at_tok: TokenRef<'input, 'a>, + name: Expression<'input, 'a>, + newline_tok: TokenRef<'input, 'a>, +) -> Decorator<'input, 'a> { + Decorator { + decorator: name, + newline_tok, + at_tok, + } +} + +fn make_comparison<'input, 'a>( + head: Expression<'input, 'a>, + tail: Vec<(CompOp<'input, 'a>, Expression<'input, 'a>)>, +) -> Expression<'input, 'a> { + let mut comparisons = vec![]; + for (operator, e) in tail { + comparisons.push(ComparisonTarget { + operator, + comparator: e, + }); + } + Expression::Comparison(Box::new(Comparison { + left: Box::new(head), + comparisons, + lpar: vec![], + rpar: vec![], + })) +} + +fn make_comparison_operator<'input, 'a>( + tok: TokenRef<'input, 'a>, +) -> Result<'a, CompOp<'input, 'a>> { + match tok.string { + "<" => Ok(CompOp::LessThan { tok }), + ">" => Ok(CompOp::GreaterThan { tok }), + "<=" => Ok(CompOp::LessThanEqual { tok }), + ">=" => Ok(CompOp::GreaterThanEqual { tok }), + "==" => Ok(CompOp::Equal { tok }), + "!=" => Ok(CompOp::NotEqual { tok }), + "in" => Ok(CompOp::In { tok }), + "is" => Ok(CompOp::Is { tok }), + _ => Err(ParserError::OperatorError), + } +} + +fn make_comparison_operator_2<'input, 'a>( + first: TokenRef<'input, 'a>, + second: TokenRef<'input, 'a>, +) -> Result<'a, CompOp<'input, 'a>> { + match (first.string, second.string) { + ("is", "not") => Ok(CompOp::IsNot { + is_tok: first, + not_tok: second, + }), + ("not", "in") => Ok(CompOp::NotIn { + not_tok: first, + in_tok: second, + }), + _ => Err(ParserError::OperatorError), + } +} + +fn make_boolean_op<'input, 'a>( + head: Expression<'input, 'a>, + tail: Vec<(TokenRef<'input, 'a>, Expression<'input, 'a>)>, +) -> Result<'a, Expression<'input, 'a>> { + if tail.is_empty() { + return Ok(head); + } + + let mut expr = head; + for (tok, right) in tail { + expr = Expression::BooleanOperation(Box::new(BooleanOperation { + left: Box::new(expr), + operator: make_boolean_operator(tok)?, + right: Box::new(right), + lpar: vec![], + rpar: vec![], + })) + } + Ok(expr) +} + +fn make_boolean_operator<'input, 'a>( + tok: TokenRef<'input, 'a>, +) -> Result<'a, BooleanOp<'input, 'a>> { + match tok.string { + "and" => Ok(BooleanOp::And { tok }), + "or" => Ok(BooleanOp::Or { tok }), + _ => Err(ParserError::OperatorError), + } +} + +fn make_binary_op<'input, 'a>( + left: Expression<'input, 'a>, + op: TokenRef<'input, 'a>, + right: Expression<'input, 'a>, +) -> Result<'a, Expression<'input, 'a>> { + let operator = make_binary_operator(op)?; + Ok(Expression::BinaryOperation(Box::new(BinaryOperation { + left: Box::new(left), + operator, + right: Box::new(right), + lpar: vec![], + rpar: vec![], + }))) +} + +fn make_binary_operator<'input, 'a>(tok: TokenRef<'input, 'a>) -> Result<'a, BinaryOp<'input, 'a>> { + match tok.string { + "+" => Ok(BinaryOp::Add { tok }), + "-" => Ok(BinaryOp::Subtract { tok }), + "*" => Ok(BinaryOp::Multiply { tok }), + "/" => Ok(BinaryOp::Divide { tok }), + "//" => Ok(BinaryOp::FloorDivide { tok }), + "%" => Ok(BinaryOp::Modulo { tok }), + "**" => Ok(BinaryOp::Power { tok }), + "<<" => Ok(BinaryOp::LeftShift { tok }), + ">>" => Ok(BinaryOp::RightShift { tok }), + "|" => Ok(BinaryOp::BitOr { tok }), + "&" => Ok(BinaryOp::BitAnd { tok }), + "^" => Ok(BinaryOp::BitXor { tok }), + "@" => Ok(BinaryOp::MatrixMultiply { tok }), + _ => Err(ParserError::OperatorError), + } +} + +fn make_unary_op<'input, 'a>( + op: TokenRef<'input, 'a>, + tail: Expression<'input, 'a>, +) -> Result<'a, Expression<'input, 'a>> { + let operator = make_unary_operator(op)?; + Ok(Expression::UnaryOperation(Box::new(UnaryOperation { + operator, + expression: Box::new(tail), + lpar: vec![], + rpar: vec![], + }))) +} + +fn make_unary_operator<'input, 'a>(tok: TokenRef<'input, 'a>) -> Result<'a, UnaryOp<'input, 'a>> { + match tok.string { + "+" => Ok(UnaryOp::Plus { tok }), + "-" => Ok(UnaryOp::Minus { tok }), + "~" => Ok(UnaryOp::BitInvert { tok }), + "not" => Ok(UnaryOp::Not { tok }), + _ => Err(ParserError::OperatorError), + } +} + +fn make_number<'input, 'a>(num: TokenRef<'input, 'a>) -> Expression<'input, 'a> { + super::numbers::parse_number(num.string) +} + +fn make_indented_block<'input, 'a>( + nl: TokenRef<'input, 'a>, + indent: TokenRef<'input, 'a>, + statements: Vec>, + dedent: TokenRef<'input, 'a>, +) -> Suite<'input, 'a> { + Suite::IndentedBlock(IndentedBlock { + body: statements, + indent: Default::default(), + newline_tok: nl, + indent_tok: indent, + dedent_tok: dedent, + }) +} + +struct SimpleStatementParts<'input, 'a> { + first_tok: TokenRef<'input, 'a>, // The first token of the first statement. Used for its whitespace + first_statement: SmallStatement<'input, 'a>, + rest: Vec<(TokenRef<'input, 'a>, SmallStatement<'input, 'a>)>, // semicolon, statement pairs + last_semi: Option>, + nl: TokenRef<'input, 'a>, +} + +fn make_semicolon<'input, 'a>(tok: TokenRef<'input, 'a>) -> Semicolon<'input, 'a> { + Semicolon { tok } +} + +fn _make_simple_statement<'input, 'a>( + parts: SimpleStatementParts<'input, 'a>, +) -> ( + TokenRef<'input, 'a>, + Vec>, + TokenRef<'input, 'a>, +) { + let mut body = vec![]; + + let mut current = parts.first_statement; + for (semi, next) in parts.rest { + body.push(current.with_semicolon(Some(make_semicolon(semi)))); + current = next; + } + if let Some(semi) = parts.last_semi { + current = current.with_semicolon(Some(make_semicolon(semi))); + } + body.push(current); + + (parts.first_tok, body, parts.nl) +} + +fn make_simple_statement_suite<'input, 'a>( + parts: SimpleStatementParts<'input, 'a>, +) -> Suite<'input, 'a> { + let (first_tok, body, newline_tok) = _make_simple_statement(parts); + + Suite::SimpleStatementSuite(SimpleStatementSuite { + body, + first_tok, + newline_tok, + }) +} + +fn make_simple_statement_line<'input, 'a>( + parts: SimpleStatementParts<'input, 'a>, +) -> SimpleStatementLine<'input, 'a> { + let (first_tok, body, newline_tok) = _make_simple_statement(parts); + SimpleStatementLine { + body, + first_tok, + newline_tok, + } +} + +fn make_if<'input, 'a>( + if_tok: TokenRef<'input, 'a>, + cond: Expression<'input, 'a>, + colon_tok: TokenRef<'input, 'a>, + block: Suite<'input, 'a>, + orelse: Option>, + is_elif: bool, +) -> If<'input, 'a> { + If { + test: cond, + body: block, + orelse: orelse.map(Box::new), + is_elif, + if_tok, + colon_tok, + } +} + +fn make_else<'input, 'a>( + else_tok: TokenRef<'input, 'a>, + colon_tok: TokenRef<'input, 'a>, + block: Suite<'input, 'a>, +) -> Else<'input, 'a> { + Else { + body: block, + else_tok, + colon_tok, + } +} + +struct StarEtc<'input, 'a>( + Option>, + Vec>, + Option>, +); + +fn make_parameters<'input, 'a>( + posonly: Option<(Vec>, ParamSlash<'input, 'a>)>, + params: Vec>, + star_etc: Option>, +) -> Parameters<'input, 'a> { + let (posonly_params, posonly_ind) = match posonly { + Some((a, b)) => (a, Some(b)), + None => (vec![], None), + }; + let (star_arg, kwonly_params, star_kwarg) = match star_etc { + None => (None, vec![], None), + Some(StarEtc(a, b, c)) => (a, b, c), + }; + Parameters { + params, + star_arg, + kwonly_params, + star_kwarg, + posonly_params, + posonly_ind, + } +} + +fn add_param_default<'input, 'a>( + param: Param<'input, 'a>, + def: Option<(AssignEqual<'input, 'a>, Expression<'input, 'a>)>, + comma_tok: Option>, +) -> Param<'input, 'a> { + let comma = comma_tok.map(make_comma); + + let (equal, default) = match def { + Some((a, b)) => (Some(a), Some(b)), + None => (None, None), + }; + Param { + equal, + default, + comma, + ..param + } +} + +fn add_param_star<'input, 'a>( + param: Param<'input, 'a>, + star: TokenRef<'input, 'a>, +) -> Param<'input, 'a> { + let str = star.string; + Param { + star: Some(str), + star_tok: Some(star), + ..param + } +} + +fn make_assign_equal<'input, 'a>(tok: TokenRef<'input, 'a>) -> AssignEqual<'input, 'a> { + AssignEqual { tok } +} + +fn make_comma<'input, 'a>(tok: TokenRef<'input, 'a>) -> Comma<'input, 'a> { + Comma { tok } +} + +fn concat(a: Vec, b: Vec) -> Vec { + a.into_iter().chain(b.into_iter()).collect() +} + +fn make_name_or_attr<'input, 'a>( + first_tok: Name<'input, 'a>, + mut tail: Vec<(TokenRef<'input, 'a>, Name<'input, 'a>)>, +) -> NameOrAttribute<'input, 'a> { + if let Some((dot, name)) = tail.pop() { + let dot = make_dot(dot); + return NameOrAttribute::A(Box::new(Attribute { + attr: name, + dot, + lpar: Default::default(), + rpar: Default::default(), + value: Box::new(make_name_or_attr(first_tok, tail).into()), + })); + } else { + NameOrAttribute::N(Box::new(first_tok)) + } +} + +fn make_name<'input, 'a>(tok: TokenRef<'input, 'a>) -> Name<'input, 'a> { + Name { + value: tok.string, + ..Default::default() + } +} + +fn make_dot<'input, 'a>(tok: TokenRef<'input, 'a>) -> Dot<'input, 'a> { + Dot { tok } +} + +fn make_import_alias<'input, 'a>( + name: NameOrAttribute<'input, 'a>, + asname: Option<(TokenRef<'input, 'a>, Name<'input, 'a>)>, +) -> ImportAlias<'input, 'a> { + ImportAlias { + name, + asname: asname.map(|(x, y)| make_as_name(x, AssignTargetExpression::Name(Box::new(y)))), + comma: None, + } +} + +fn make_as_name<'input, 'a>( + as_tok: TokenRef<'input, 'a>, + name: AssignTargetExpression<'input, 'a>, +) -> AsName<'input, 'a> { + AsName { name, as_tok } +} + +type ParenthesizedImportNames<'input, 'a> = ( + Option>, + ImportNames<'input, 'a>, + Option>, +); + +fn make_import_from<'input, 'a>( + from_tok: TokenRef<'input, 'a>, + dots: Vec>, + module: Option>, + import_tok: TokenRef<'input, 'a>, + aliases: ParenthesizedImportNames<'input, 'a>, +) -> ImportFrom<'input, 'a> { + let (lpar, names, rpar) = aliases; + + ImportFrom { + module, + names, + relative: dots, + lpar, + rpar, + semicolon: None, + from_tok, + import_tok, + } +} + +fn make_import<'input, 'a>( + import_tok: TokenRef<'input, 'a>, + names: Vec>, +) -> Import<'input, 'a> { + Import { + names, + semicolon: None, + import_tok, + } +} + +fn make_import_from_as_names<'input, 'a>( + first: ImportAlias<'input, 'a>, + tail: Vec<(Comma<'input, 'a>, ImportAlias<'input, 'a>)>, +) -> Vec> { + let mut ret = vec![]; + let mut cur = first; + for (comma, alias) in tail { + ret.push(cur.with_comma(comma)); + cur = alias; + } + ret.push(cur); + ret +} + +fn make_lpar<'input, 'a>(tok: TokenRef<'input, 'a>) -> LeftParen<'input, 'a> { + LeftParen { lpar_tok: tok } +} + +fn make_rpar<'input, 'a>(tok: TokenRef<'input, 'a>) -> RightParen<'input, 'a> { + RightParen { rpar_tok: tok } +} + +fn make_module<'input, 'a>( + body: Vec>, + tok: TokenRef<'input, 'a>, + encoding: &str, +) -> Module<'input, 'a> { + Module { + body, + eof_tok: tok, + default_indent: " ", + default_newline: "\n", + has_trailing_newline: false, + encoding: encoding.to_string(), + } +} + +fn make_attribute<'input, 'a>( + value: Expression<'input, 'a>, + dot: TokenRef<'input, 'a>, + attr: Name<'input, 'a>, +) -> Attribute<'input, 'a> { + let dot = make_dot(dot); + Attribute { + attr, + dot, + lpar: Default::default(), + rpar: Default::default(), + value: Box::new(value), + } +} + +fn make_starred_element<'input, 'a>( + star_tok: TokenRef<'input, 'a>, + rest: Element<'input, 'a>, +) -> StarredElement<'input, 'a> { + let value = match rest { + Element::Simple { value, .. } => value, + _ => panic!("Internal error while making starred element"), + }; + StarredElement { + value: Box::new(value), + lpar: Default::default(), + rpar: Default::default(), + comma: Default::default(), + star_tok, + } +} + +fn assign_target_to_element<'input, 'a>( + expr: AssignTargetExpression<'input, 'a>, +) -> Element<'input, 'a> { + match expr { + AssignTargetExpression::Attribute(a) => Element::Simple { + value: Expression::Attribute(a), + comma: Default::default(), + }, + AssignTargetExpression::Name(a) => Element::Simple { + value: Expression::Name(a), + comma: Default::default(), + }, + AssignTargetExpression::Tuple(a) => Element::Simple { + value: Expression::Tuple(a), + comma: Default::default(), + }, + AssignTargetExpression::StarredElement(s) => Element::Starred(s), + AssignTargetExpression::List(l) => Element::Simple { + value: Expression::List(l), + comma: Default::default(), + }, + AssignTargetExpression::Subscript(s) => Element::Simple { + value: Expression::Subscript(s), + comma: Default::default(), + }, + } +} + +fn make_assignment<'input, 'a>( + lhs: Vec<(AssignTargetExpression<'input, 'a>, TokenRef<'input, 'a>)>, + rhs: Expression<'input, 'a>, +) -> Assign<'input, 'a> { + let mut targets = vec![]; + for (target, equal_tok) in lhs { + targets.push(AssignTarget { target, equal_tok }); + } + Assign { + targets, + value: rhs, + semicolon: Default::default(), + } +} + +fn expr_to_element<'input, 'a>(expr: Expression<'input, 'a>) -> Element<'input, 'a> { + match expr { + Expression::StarredElement(inner_expr) => Element::Starred(inner_expr), + _ => Element::Simple { + value: expr, + comma: Default::default(), + }, + } +} + +fn make_tuple<'input, 'a>( + first: Element<'input, 'a>, + rest: Vec<(Comma<'input, 'a>, Element<'input, 'a>)>, + trailing_comma: Option>, + lpar: Option>, + rpar: Option>, +) -> Tuple<'input, 'a> { + let elements = comma_separate(first, rest, trailing_comma); + + let lpar = lpar.map(|l| vec![l]).unwrap_or_default(); + let rpar = rpar.map(|r| vec![r]).unwrap_or_default(); + + Tuple { + elements, + lpar, + rpar, + } +} + +fn make_tuple_from_elements<'input, 'a>( + first: Element<'input, 'a>, + mut rest: Vec>, +) -> Tuple<'input, 'a> { + rest.insert(0, first); + Tuple { + elements: rest, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn make_kwarg<'input, 'a>( + name: Name<'input, 'a>, + eq: TokenRef<'input, 'a>, + value: Expression<'input, 'a>, +) -> Arg<'input, 'a> { + let equal = Some(make_assign_equal(eq)); + let keyword = Some(name); + Arg { + value, + keyword, + equal, + comma: None, + star: "", + star_tok: None, + } +} + +fn make_star_arg<'input, 'a>( + star: TokenRef<'input, 'a>, + expr: Expression<'input, 'a>, +) -> Arg<'input, 'a> { + let str = star.string; + Arg { + value: expr, + keyword: None, + equal: None, + comma: None, + star: str, + star_tok: Some(star), + } +} + +fn make_call<'input, 'a>( + func: Expression<'input, 'a>, + lpar_tok: TokenRef<'input, 'a>, + args: Vec>, + rpar_tok: TokenRef<'input, 'a>, +) -> Call<'input, 'a> { + let lpar = vec![]; + let rpar = vec![]; + let func = Box::new(func); + + Call { + func, + args, + lpar, + rpar, + lpar_tok, + rpar_tok, + } +} + +fn make_genexp_call<'input, 'a>( + func: Expression<'input, 'a>, + mut genexp: GeneratorExp<'input, 'a>, +) -> Call<'input, 'a> { + // func ( (genexp) ) + // ^ + // lpar_tok + + // lpar_tok is the same token that was used to parse genexp's first lpar. + // Nothing owns the whitespace before lpar_tok, so the same token is passed in here + // again, to be converted into whitespace_after_func. We then split off a pair of + // parenthesis from genexp, since now Call will own them. + + let mut lpars = genexp.lpar.into_iter(); + let lpar_tok = lpars.next().expect("genexp without lpar").lpar_tok; + genexp.lpar = lpars.collect(); + let rpar_tok = genexp.rpar.pop().expect("genexp without rpar").rpar_tok; + + Call { + func: Box::new(func), + args: vec![Arg { + value: Expression::GeneratorExp(Box::new(genexp)), + keyword: None, + equal: None, + comma: None, + star: "", + star_tok: None, + }], + lpar: vec![], + rpar: vec![], + lpar_tok, + rpar_tok, + } +} + +fn make_arg<'input, 'a>(expr: Expression<'input, 'a>) -> Arg<'input, 'a> { + Arg { + value: expr, + keyword: Default::default(), + equal: Default::default(), + comma: Default::default(), + star: Default::default(), + star_tok: None, + } +} + +fn make_comp_if<'input, 'a>( + if_tok: TokenRef<'input, 'a>, + test: Expression<'input, 'a>, +) -> CompIf<'input, 'a> { + CompIf { test, if_tok } +} + +fn make_for_if<'input, 'a>( + async_tok: Option>, + for_tok: TokenRef<'input, 'a>, + target: AssignTargetExpression<'input, 'a>, + in_tok: TokenRef<'input, 'a>, + iter: Expression<'input, 'a>, + ifs: Vec>, +) -> CompFor<'input, 'a> { + let inner_for_in = None; + let asynchronous = async_tok.as_ref().map(|_| make_async()); + + CompFor { + target, + iter, + ifs, + inner_for_in, + asynchronous, + async_tok, + for_tok, + in_tok, + } +} + +fn make_bare_genexp<'input, 'a>( + elt: Expression<'input, 'a>, + for_in: CompFor<'input, 'a>, +) -> GeneratorExp<'input, 'a> { + GeneratorExp { + elt: Box::new(elt), + for_in: Box::new(for_in), + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn merge_comp_fors<'input, 'a>( + comp_fors: Vec>, +) -> GrammarResult> { + if comp_fors.len() > MAX_RECURSION_DEPTH { + return Err("shallower comprehension"); + } + let mut it = comp_fors.into_iter().rev(); + let first = it.next().expect("cant merge empty comp_fors"); + + Ok(it.fold(first, |acc, curr| CompFor { + inner_for_in: Some(Box::new(acc)), + ..curr + })) +} + +fn make_left_bracket<'input, 'a>(tok: TokenRef<'input, 'a>) -> LeftSquareBracket<'input, 'a> { + LeftSquareBracket { tok } +} + +fn make_right_bracket<'input, 'a>(tok: TokenRef<'input, 'a>) -> RightSquareBracket<'input, 'a> { + RightSquareBracket { tok } +} + +fn make_left_brace<'input, 'a>(tok: TokenRef<'input, 'a>) -> LeftCurlyBrace<'input, 'a> { + LeftCurlyBrace { tok } +} + +fn make_right_brace<'input, 'a>(tok: TokenRef<'input, 'a>) -> RightCurlyBrace<'input, 'a> { + RightCurlyBrace { tok } +} + +fn make_list_comp<'input, 'a>( + lbracket: LeftSquareBracket<'input, 'a>, + elt: Expression<'input, 'a>, + for_in: CompFor<'input, 'a>, + rbracket: RightSquareBracket<'input, 'a>, +) -> ListComp<'input, 'a> { + ListComp { + elt: Box::new(elt), + for_in: Box::new(for_in), + lbracket, + rbracket, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn make_set_comp<'input, 'a>( + lbrace: LeftCurlyBrace<'input, 'a>, + elt: Expression<'input, 'a>, + for_in: CompFor<'input, 'a>, + rbrace: RightCurlyBrace<'input, 'a>, +) -> SetComp<'input, 'a> { + SetComp { + elt: Box::new(elt), + for_in: Box::new(for_in), + lbrace, + rbrace, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn make_dict_comp<'input, 'a>( + lbrace: LeftCurlyBrace<'input, 'a>, + kvpair: ( + Expression<'input, 'a>, + TokenRef<'input, 'a>, + Expression<'input, 'a>, + ), + for_in: CompFor<'input, 'a>, + rbrace: RightCurlyBrace<'input, 'a>, +) -> DictComp<'input, 'a> { + let (key, colon_tok, value) = kvpair; + + DictComp { + key: Box::new(key), + value: Box::new(value), + for_in: Box::new(for_in), + lbrace, + rbrace, + lpar: vec![], + rpar: vec![], + colon_tok, + } +} + +fn make_list<'input, 'a>( + lbracket: LeftSquareBracket<'input, 'a>, + elements: Vec>, + rbracket: RightSquareBracket<'input, 'a>, +) -> List<'input, 'a> { + List { + elements, + lbracket, + rbracket, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn make_set<'input, 'a>( + lbrace: LeftCurlyBrace<'input, 'a>, + elements: Vec>, + rbrace: RightCurlyBrace<'input, 'a>, +) -> Set<'input, 'a> { + Set { + elements, + lbrace, + rbrace, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn comma_separate<'input, 'a, T>( + first: T, + rest: Vec<(Comma<'input, 'a>, T)>, + trailing_comma: Option>, +) -> Vec +where + T: WithComma<'input, 'a>, +{ + let mut elements = vec![]; + let mut current = first; + for (comma, next) in rest { + elements.push(current.with_comma(comma)); + current = next; + } + if let Some(comma) = trailing_comma { + current = current.with_comma(comma); + } + elements.push(current); + elements +} + +fn make_dict<'input, 'a>( + lbrace: LeftCurlyBrace<'input, 'a>, + elements: Vec>, + rbrace: RightCurlyBrace<'input, 'a>, +) -> Dict<'input, 'a> { + Dict { + elements, + lbrace, + rbrace, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn make_double_starred_keypairs<'input, 'a>( + first: DictElement<'input, 'a>, + rest: Vec<(Comma<'input, 'a>, DictElement<'input, 'a>)>, + trailing_comma: Option>, +) -> Vec> { + let mut elements = vec![]; + let mut current = first; + for (comma, next) in rest { + elements.push(current.with_comma(comma)); + current = next; + } + if let Some(comma) = trailing_comma { + current = current.with_comma(comma); + } + elements.push(current); + elements +} + +fn make_dict_element<'input, 'a>( + el: ( + Expression<'input, 'a>, + TokenRef<'input, 'a>, + Expression<'input, 'a>, + ), +) -> DictElement<'input, 'a> { + let (key, colon_tok, value) = el; + DictElement::Simple { + key, + value, + comma: Default::default(), + colon_tok, + } +} + +fn make_double_starred_element<'input, 'a>( + star_tok: TokenRef<'input, 'a>, + value: Expression<'input, 'a>, +) -> StarredDictElement<'input, 'a> { + StarredDictElement { + value, + comma: Default::default(), + star_tok, + } +} + +fn make_index<'input, 'a>(value: Expression<'input, 'a>) -> BaseSlice<'input, 'a> { + BaseSlice::Index(Box::new(Index { + value, + star: None, + star_tok: None, + })) +} + +fn make_index_from_arg<'input, 'a>(arg: Arg<'input, 'a>) -> BaseSlice<'input, 'a> { + BaseSlice::Index(Box::new(Index { + value: arg.value, + star: Some(arg.star), + star_tok: arg.star_tok, + })) +} + +fn make_colon<'input, 'a>(tok: TokenRef<'input, 'a>) -> Colon<'input, 'a> { + Colon { tok } +} + +fn make_slice<'input, 'a>( + lower: Option>, + first_colon: TokenRef<'input, 'a>, + upper: Option>, + rest: Option<(TokenRef<'input, 'a>, Option>)>, +) -> BaseSlice<'input, 'a> { + let first_colon = make_colon(first_colon); + let (second_colon, step) = if let Some((tok, step)) = rest { + (Some(make_colon(tok)), step) + } else { + (None, None) + }; + BaseSlice::Slice(Box::new(Slice { + lower, + upper, + step, + first_colon, + second_colon, + })) +} + +fn make_slices<'input, 'a>( + first: BaseSlice<'input, 'a>, + rest: Vec<(Comma<'input, 'a>, BaseSlice<'input, 'a>)>, + trailing_comma: Option>, +) -> Vec> { + let mut elements = vec![]; + let mut current = first; + for (comma, next) in rest { + elements.push(SubscriptElement { + slice: current, + comma: Some(comma), + }); + current = next; + } + elements.push(SubscriptElement { + slice: current, + comma: trailing_comma, + }); + elements +} + +fn make_subscript<'input, 'a>( + value: Expression<'input, 'a>, + lbracket: LeftSquareBracket<'input, 'a>, + slice: Vec>, + rbracket: RightSquareBracket<'input, 'a>, +) -> Subscript<'input, 'a> { + Subscript { + value: Box::new(value), + slice, + lbracket, + rbracket, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn make_ifexp<'input, 'a>( + body: Expression<'input, 'a>, + if_tok: TokenRef<'input, 'a>, + test: Expression<'input, 'a>, + else_tok: TokenRef<'input, 'a>, + orelse: Expression<'input, 'a>, +) -> IfExp<'input, 'a> { + IfExp { + test: Box::new(test), + body: Box::new(body), + orelse: Box::new(orelse), + lpar: Default::default(), + rpar: Default::default(), + if_tok, + else_tok, + } +} + +fn add_arguments_trailing_comma<'input, 'a>( + mut args: Vec>, + trailing_comma: Option>, +) -> Vec> { + if let Some(comma) = trailing_comma { + let last = args.pop().unwrap(); + args.push(last.with_comma(comma)); + } + args +} + +fn make_lambda<'input, 'a>( + lambda_tok: TokenRef<'input, 'a>, + params: Parameters<'input, 'a>, + colon_tok: TokenRef<'input, 'a>, + expr: Expression<'input, 'a>, +) -> Lambda<'input, 'a> { + let colon = make_colon(colon_tok); + Lambda { + params: Box::new(params), + body: Box::new(expr), + colon, + lpar: Default::default(), + rpar: Default::default(), + lambda_tok, + } +} + +fn make_annotation<'input, 'a>( + tok: TokenRef<'input, 'a>, + ann: Expression<'input, 'a>, +) -> Annotation<'input, 'a> { + Annotation { + annotation: ann, + tok, + } +} + +fn make_ann_assignment<'input, 'a>( + target: AssignTargetExpression<'input, 'a>, + col: TokenRef<'input, 'a>, + ann: Expression<'input, 'a>, + rhs: Option<(TokenRef<'input, 'a>, Expression<'input, 'a>)>, +) -> AnnAssign<'input, 'a> { + let annotation = make_annotation(col, ann); + let (eq, value) = rhs.map(|(x, y)| (Some(x), Some(y))).unwrap_or((None, None)); + let equal = eq.map(make_assign_equal); + AnnAssign { + target, + annotation, + value, + equal, + semicolon: None, + } +} + +fn make_yield<'input, 'a>( + yield_tok: TokenRef<'input, 'a>, + f: Option>, + e: Option>, +) -> Yield<'input, 'a> { + let value = match (f, e) { + (None, None) => None, + (Some(f), Some(e)) => Some(YieldValue::From(Box::new(make_from(f, e)))), + (None, Some(e)) => Some(YieldValue::Expression(Box::new(e))), + _ => panic!("yield from without expression"), + }; + Yield { + value: value.map(Box::new), + lpar: Default::default(), + rpar: Default::default(), + yield_tok, + } +} + +fn make_from<'input, 'a>(tok: TokenRef<'input, 'a>, e: Expression<'input, 'a>) -> From<'input, 'a> { + From { item: e, tok } +} + +fn make_return<'input, 'a>( + return_tok: TokenRef<'input, 'a>, + value: Option>, +) -> Return<'input, 'a> { + Return { + value, + semicolon: Default::default(), + return_tok, + } +} + +fn make_assert<'input, 'a>( + assert_tok: TokenRef<'input, 'a>, + test: Expression<'input, 'a>, + rest: Option<(Comma<'input, 'a>, Expression<'input, 'a>)>, +) -> Assert<'input, 'a> { + let (comma, msg) = if let Some((c, msg)) = rest { + (Some(c), Some(msg)) + } else { + (None, None) + }; + + Assert { + test, + msg, + comma, + semicolon: Default::default(), + assert_tok, + } +} + +fn make_raise<'input, 'a>( + raise_tok: TokenRef<'input, 'a>, + exc: Option>, + rest: Option<(TokenRef<'input, 'a>, Expression<'input, 'a>)>, +) -> Raise<'input, 'a> { + let cause = rest.map(|(t, e)| make_from(t, e)); + + Raise { + exc, + cause, + semicolon: Default::default(), + raise_tok, + } +} + +fn make_global<'input, 'a>( + tok: TokenRef<'input, 'a>, + init: Vec<(Name<'input, 'a>, Comma<'input, 'a>)>, + last: Name<'input, 'a>, +) -> Global<'input, 'a> { + let mut names: Vec> = init + .into_iter() + .map(|(name, c)| NameItem { + name, + comma: Some(c), + }) + .collect(); + names.push(NameItem { + name: last, + comma: None, + }); + Global { + names, + semicolon: Default::default(), + tok, + } +} + +fn make_nonlocal<'input, 'a>( + tok: TokenRef<'input, 'a>, + init: Vec<(Name<'input, 'a>, Comma<'input, 'a>)>, + last: Name<'input, 'a>, +) -> Nonlocal<'input, 'a> { + let mut names: Vec> = init + .into_iter() + .map(|(name, c)| NameItem { + name, + comma: Some(c), + }) + .collect(); + names.push(NameItem { + name: last, + comma: None, + }); + Nonlocal { + names, + semicolon: Default::default(), + tok, + } +} + +#[allow(clippy::too_many_arguments)] +fn make_for<'input, 'a>( + async_tok: Option>, + for_tok: TokenRef<'input, 'a>, + target: AssignTargetExpression<'input, 'a>, + in_tok: TokenRef<'input, 'a>, + iter: Expression<'input, 'a>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, + orelse: Option>, +) -> For<'input, 'a> { + let asynchronous = async_tok.as_ref().map(|_| make_async()); + + For { + target, + iter, + body, + orelse, + asynchronous, + async_tok, + for_tok, + in_tok, + colon_tok, + } +} + +fn make_while<'input, 'a>( + while_tok: TokenRef<'input, 'a>, + test: Expression<'input, 'a>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, + orelse: Option>, +) -> While<'input, 'a> { + While { + test, + body, + orelse, + while_tok, + colon_tok, + } +} + +fn make_await<'input, 'a>( + await_tok: TokenRef<'input, 'a>, + expression: Expression<'input, 'a>, +) -> Await<'input, 'a> { + Await { + expression: Box::new(expression), + lpar: Default::default(), + rpar: Default::default(), + await_tok, + } +} + +fn make_class_def<'input, 'a>( + class_tok: TokenRef<'input, 'a>, + name: Name<'input, 'a>, + type_parameters: Option>, + args: Option<( + LeftParen<'input, 'a>, + Option>>, + RightParen<'input, 'a>, + )>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, +) -> std::result::Result, &'static str> { + let mut bases = vec![]; + let mut keywords = vec![]; + let mut lpar_tok = None; + let mut rpar_tok = None; + let mut lpar = None; + let mut rpar = None; + + if let Some((lpar_, args, rpar_)) = args { + lpar_tok = Some(lpar_.lpar_tok); + rpar_tok = Some(rpar_.rpar_tok); + lpar = Some(lpar_); + rpar = Some(rpar_); + if let Some(args) = args { + let mut current_arg = &mut bases; + let mut seen_keyword = false; + for arg in args { + if arg.star == "**" || arg.keyword.is_some() { + current_arg = &mut keywords; + seen_keyword = true; + } + if seen_keyword + && (arg.star == "*" || (arg.star.is_empty() && arg.keyword.is_none())) + { + return Err("Positional argument follows keyword argument"); + } + // TODO: libcst-python does validation here + current_arg.push(arg); + } + } + } + Ok(ClassDef { + name, + type_parameters, + body, + bases, + keywords, + decorators: vec![], + lpar, + rpar, + class_tok, + lpar_tok, + rpar_tok, + colon_tok, + }) +} + +fn make_string<'input, 'a>(tok: TokenRef<'input, 'a>) -> String<'input, 'a> { + String::Simple(SimpleString { + value: tok.string, + ..Default::default() + }) +} + +fn make_strings<'input, 'a>( + s: Vec<(String<'input, 'a>, TokenRef<'input, 'a>)>, +) -> GrammarResult> { + if s.len() > MAX_RECURSION_DEPTH { + return Err("shorter concatenated string"); + } + let mut strings = s.into_iter().rev(); + let (first, _) = strings.next().expect("no strings to make a string of"); + Ok(strings.fold(first, |acc, (str, tok)| { + let ret: String<'input, 'a> = String::Concatenated(ConcatenatedString { + left: Box::new(str), + right: Box::new(acc), + lpar: Default::default(), + rpar: Default::default(), + right_tok: tok, + }); + ret + })) +} + +fn make_tstring_expression<'input, 'a>( + lbrace_tok: TokenRef<'input, 'a>, + expression: Expression<'input, 'a>, + eq: Option>, + conversion_pair: Option<(TokenRef<'input, 'a>, &'a str)>, + format_pair: Option<( + TokenRef<'input, 'a>, + Vec>, + )>, + rbrace_tok: TokenRef<'input, 'a>, +) -> TemplatedStringExpression<'input, 'a> { + let equal: Option> = eq.map(make_assign_equal); + let (conversion_tok, conversion) = if let Some((t, c)) = conversion_pair { + (Some(t), Some(c)) + } else { + (None, None) + }; + let (format_tok, format_spec) = if let Some((t, f)) = format_pair { + (Some(t), Some(f)) + } else { + (None, None) + }; + let after_expr_tok = if equal.is_some() { + None + } else if let Some(tok) = conversion_tok { + Some(tok) + } else if let Some(tok) = format_tok { + Some(tok) + } else { + Some(rbrace_tok) + }; + + TemplatedStringExpression { + expression, + conversion, + format_spec, + equal, + lbrace_tok, + after_expr_tok, + } +} + +fn make_fstring_expression<'input, 'a>( + lbrace_tok: TokenRef<'input, 'a>, + expression: Expression<'input, 'a>, + eq: Option>, + conversion_pair: Option<(TokenRef<'input, 'a>, &'a str)>, + format_pair: Option<( + TokenRef<'input, 'a>, + Vec>, + )>, + rbrace_tok: TokenRef<'input, 'a>, +) -> FormattedStringExpression<'input, 'a> { + let equal = eq.map(make_assign_equal); + let (conversion_tok, conversion) = if let Some((t, c)) = conversion_pair { + (Some(t), Some(c)) + } else { + (None, None) + }; + let (format_tok, format_spec) = if let Some((t, f)) = format_pair { + (Some(t), Some(f)) + } else { + (None, None) + }; + let after_expr_tok = if equal.is_some() { + None + } else if let Some(tok) = conversion_tok { + Some(tok) + } else if let Some(tok) = format_tok { + Some(tok) + } else { + Some(rbrace_tok) + }; + + FormattedStringExpression { + expression, + conversion, + format_spec, + equal, + lbrace_tok, + after_expr_tok, + } +} + +fn make_fstring<'input, 'a>( + start: &'a str, + parts: Vec>, + end: &'a str, +) -> FormattedString<'input, 'a> { + FormattedString { + start, + parts, + end, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn make_tstring<'input, 'a>( + start: &'a str, + parts: Vec>, + end: &'a str, +) -> TemplatedString<'input, 'a> { + TemplatedString { + start, + parts, + end, + lpar: Default::default(), + rpar: Default::default(), + } +} + +fn make_finally<'input, 'a>( + finally_tok: TokenRef<'input, 'a>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, +) -> Finally<'input, 'a> { + Finally { + body, + finally_tok, + colon_tok, + } +} + +fn make_except<'input, 'a>( + except_tok: TokenRef<'input, 'a>, + exp: Option>, + as_: Option<(TokenRef<'input, 'a>, Name<'input, 'a>)>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, +) -> ExceptHandler<'input, 'a> { + // TODO: AsName should come from outside + let name = as_.map(|(x, y)| make_as_name(x, AssignTargetExpression::Name(Box::new(y)))); + ExceptHandler { + body, + r#type: exp, + name, + except_tok, + colon_tok, + } +} + +fn make_except_star<'input, 'a>( + except_tok: TokenRef<'input, 'a>, + star_tok: TokenRef<'input, 'a>, + exp: Expression<'input, 'a>, + as_: Option<(TokenRef<'input, 'a>, Name<'input, 'a>)>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, +) -> ExceptStarHandler<'input, 'a> { + // TODO: AsName should come from outside + let name = as_.map(|(x, y)| make_as_name(x, AssignTargetExpression::Name(Box::new(y)))); + ExceptStarHandler { + body, + r#type: exp, + name, + except_tok, + colon_tok, + star_tok, + } +} + +fn make_try<'input, 'a>( + try_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, + handlers: Vec>, + orelse: Option>, + finalbody: Option>, +) -> Try<'input, 'a> { + Try { + body, + handlers, + orelse, + finalbody, + try_tok, + } +} + +fn make_try_star<'input, 'a>( + try_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, + handlers: Vec>, + orelse: Option>, + finalbody: Option>, +) -> TryStar<'input, 'a> { + TryStar { + body, + handlers, + orelse, + finalbody, + try_tok, + } +} + +fn make_aug_op<'input, 'a>(tok: TokenRef<'input, 'a>) -> Result<'a, AugOp<'input, 'a>> { + Ok(match tok.string { + "+=" => AugOp::AddAssign { tok }, + "-=" => AugOp::SubtractAssign { tok }, + "*=" => AugOp::MultiplyAssign { tok }, + "@=" => AugOp::MatrixMultiplyAssign { tok }, + "/=" => AugOp::DivideAssign { tok }, + "%=" => AugOp::ModuloAssign { tok }, + "&=" => AugOp::BitAndAssign { tok }, + "|=" => AugOp::BitOrAssign { tok }, + "^=" => AugOp::BitXorAssign { tok }, + "<<=" => AugOp::LeftShiftAssign { tok }, + ">>=" => AugOp::RightShiftAssign { tok }, + "**=" => AugOp::PowerAssign { tok }, + "//=" => AugOp::FloorDivideAssign { tok }, + _ => return Err(ParserError::OperatorError), + }) +} + +fn make_aug_assign<'input, 'a>( + target: AssignTargetExpression<'input, 'a>, + operator: AugOp<'input, 'a>, + value: Expression<'input, 'a>, +) -> AugAssign<'input, 'a> { + AugAssign { + target, + operator, + value, + semicolon: Default::default(), + } +} + +fn make_with_item<'input, 'a>( + item: Expression<'input, 'a>, + as_: Option>, + n: Option>, +) -> WithItem<'input, 'a> { + let asname = match (as_, n) { + (Some(as_), Some(n)) => Some(make_as_name(as_, n)), + (None, None) => None, + _ => panic!("as and name should be present or missing together"), + }; + WithItem { + item, + asname, + comma: Default::default(), + } +} + +fn make_with<'input, 'a>( + async_tok: Option>, + with_tok: TokenRef<'input, 'a>, + lpar: Option>, + items: Vec>, + rpar: Option>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, +) -> With<'input, 'a> { + let asynchronous = async_tok.as_ref().map(|_| make_async()); + With { + items, + body, + asynchronous, + lpar, + rpar, + async_tok, + with_tok, + colon_tok, + } +} + +fn make_del<'input, 'a>( + tok: TokenRef<'input, 'a>, + target: DelTargetExpression<'input, 'a>, +) -> Del<'input, 'a> { + Del { + target, + semicolon: Default::default(), + tok, + } +} + +fn make_del_tuple<'input, 'a>( + lpar: Option>, + elements: Vec>, + rpar: Option>, +) -> DelTargetExpression<'input, 'a> { + DelTargetExpression::Tuple(Box::new(Tuple { + elements, + lpar: lpar.map(|x| vec![x]).unwrap_or_default(), + rpar: rpar.map(|x| vec![x]).unwrap_or_default(), + })) +} + +fn make_named_expr<'input, 'a>( + name: Name<'input, 'a>, + tok: TokenRef<'input, 'a>, + expr: Expression<'input, 'a>, +) -> NamedExpr<'input, 'a> { + NamedExpr { + target: Box::new(Expression::Name(Box::new(name))), + value: Box::new(expr), + lpar: Default::default(), + rpar: Default::default(), + walrus_tok: tok, + } +} + +fn make_match<'input, 'a>( + match_tok: TokenRef<'input, 'a>, + subject: Expression<'input, 'a>, + colon_tok: TokenRef<'input, 'a>, + indent_tok: TokenRef<'input, 'a>, + cases: Vec>, + dedent_tok: TokenRef<'input, 'a>, +) -> Match<'input, 'a> { + Match { + subject, + cases, + indent: Default::default(), + match_tok, + colon_tok, + indent_tok, + dedent_tok, + } +} + +fn make_case<'input, 'a>( + case_tok: TokenRef<'input, 'a>, + pattern: MatchPattern<'input, 'a>, + guard: Option<(TokenRef<'input, 'a>, Expression<'input, 'a>)>, + colon_tok: TokenRef<'input, 'a>, + body: Suite<'input, 'a>, +) -> MatchCase<'input, 'a> { + let (if_tok, guard) = match guard { + Some((if_tok, guard)) => (Some(if_tok), Some(guard)), + None => (None, None), + }; + MatchCase { + pattern, + guard, + body, + case_tok, + if_tok, + colon_tok, + } +} + +fn make_match_value<'input, 'a>(value: Expression<'input, 'a>) -> MatchPattern<'input, 'a> { + MatchPattern::Value(MatchValue { value }) +} + +fn make_match_singleton<'input, 'a>(value: Name<'input, 'a>) -> MatchPattern<'input, 'a> { + MatchPattern::Singleton(MatchSingleton { value }) +} + +fn make_list_pattern<'input, 'a>( + lbracket: Option>, + patterns: Vec>, + rbracket: Option>, +) -> MatchSequence<'input, 'a> { + MatchSequence::MatchList(MatchList { + patterns, + lbracket, + rbracket, + lpar: Default::default(), + rpar: Default::default(), + }) +} + +fn make_as_pattern<'input, 'a>( + pattern: Option>, + as_tok: Option>, + name: Option>, +) -> MatchPattern<'input, 'a> { + MatchPattern::As(Box::new(MatchAs { + pattern, + name, + lpar: Default::default(), + rpar: Default::default(), + as_tok, + })) +} + +fn make_bit_or<'input, 'a>(tok: TokenRef<'input, 'a>) -> BitOr<'input, 'a> { + BitOr { tok } +} + +fn make_or_pattern<'input, 'a>( + first: MatchPattern<'input, 'a>, + rest: Vec<(TokenRef<'input, 'a>, MatchPattern<'input, 'a>)>, +) -> MatchPattern<'input, 'a> { + if rest.is_empty() { + return first; + } + + let mut patterns = vec![]; + let mut current = first; + for (sep, next) in rest { + let op = make_bit_or(sep); + patterns.push(MatchOrElement { + pattern: current, + separator: Some(op), + }); + current = next; + } + patterns.push(MatchOrElement { + pattern: current, + separator: None, + }); + MatchPattern::Or(Box::new(MatchOr { + patterns, + lpar: Default::default(), + rpar: Default::default(), + })) +} + +fn ensure_real_number<'input, 'a>( + tok: TokenRef<'input, 'a>, +) -> GrammarResult> { + match make_number(tok) { + e @ (Expression::Integer(_) | Expression::Float(_)) => Ok(e), + _ => Err("real number"), + } +} + +fn ensure_imaginary_number<'input, 'a>( + tok: TokenRef<'input, 'a>, +) -> GrammarResult> { + match make_number(tok) { + e @ Expression::Imaginary(_) => Ok(e), + _ => Err("imaginary number"), + } +} + +fn make_tuple_pattern<'input, 'a>( + lpar: LeftParen<'input, 'a>, + patterns: Vec>, + rpar: RightParen<'input, 'a>, +) -> MatchSequence<'input, 'a> { + MatchSequence::MatchTuple(MatchTuple { + patterns, + lpar: vec![lpar], + rpar: vec![rpar], + }) +} + +fn make_open_sequence_pattern<'input, 'a>( + first: StarrableMatchSequenceElement<'input, 'a>, + comma: Comma<'input, 'a>, + mut rest: Vec>, +) -> Vec> { + rest.insert(0, first.with_comma(comma)); + rest +} + +fn make_match_sequence_element<'input, 'a>( + value: MatchPattern<'input, 'a>, +) -> MatchSequenceElement<'input, 'a> { + MatchSequenceElement { + value, + comma: Default::default(), + } +} + +fn make_match_star<'input, 'a>( + star_tok: TokenRef<'input, 'a>, + name: Option>, +) -> MatchStar<'input, 'a> { + MatchStar { + name, + comma: Default::default(), + star_tok, + } +} + +fn make_match_mapping<'input, 'a>( + lbrace: LeftCurlyBrace<'input, 'a>, + mut elements: Vec>, + el_comma: Option>, + star_tok: Option>, + rest: Option>, + trailing_comma: Option>, + rbrace: RightCurlyBrace<'input, 'a>, +) -> MatchPattern<'input, 'a> { + if let Some(c) = el_comma { + if let Some(el) = elements.pop() { + elements.push(el.with_comma(c)); + } + // TODO: else raise error + } + MatchPattern::Mapping(MatchMapping { + elements, + rest, + trailing_comma, + lbrace, + rbrace, + lpar: Default::default(), + rpar: Default::default(), + star_tok, + }) +} + +fn make_match_mapping_element<'input, 'a>( + key: Expression<'input, 'a>, + colon_tok: TokenRef<'input, 'a>, + pattern: MatchPattern<'input, 'a>, +) -> MatchMappingElement<'input, 'a> { + MatchMappingElement { + key, + pattern, + comma: Default::default(), + colon_tok, + } +} + +fn make_class_pattern<'input, 'a>( + cls: NameOrAttribute<'input, 'a>, + lpar_tok: TokenRef<'input, 'a>, + mut patterns: Vec>, + pat_comma: Option>, + mut kwds: Vec>, + kwd_comma: Option>, + rpar_tok: TokenRef<'input, 'a>, +) -> MatchPattern<'input, 'a> { + if let Some(c) = pat_comma { + if let Some(el) = patterns.pop() { + patterns.push(el.with_comma(c)); + } + // TODO: else raise error + } + if let Some(c) = kwd_comma { + if let Some(el) = kwds.pop() { + kwds.push(el.with_comma(c)); + } + // TODO: else raise error + } + MatchPattern::Class(MatchClass { + cls, + patterns, + kwds, + lpar: Default::default(), + rpar: Default::default(), + lpar_tok, + rpar_tok, + }) +} + +fn make_match_keyword_element<'input, 'a>( + key: Name<'input, 'a>, + equal_tok: TokenRef<'input, 'a>, + pattern: MatchPattern<'input, 'a>, +) -> MatchKeywordElement<'input, 'a> { + MatchKeywordElement { + key, + pattern, + comma: Default::default(), + equal_tok, + } +} + +struct TypeParamBound<'input, 'a>(TokenRef<'input, 'a>, Expression<'input, 'a>); + +fn make_type_param_bound<'input, 'a>( + colon_tok: TokenRef<'input, 'a>, + e: Expression<'input, 'a>, +) -> TypeParamBound<'input, 'a> { + TypeParamBound(colon_tok, e) +} + +fn make_param_spec<'input, 'a>( + star_tok: TokenRef<'input, 'a>, + name: Name<'input, 'a>, + def: Option<(AssignEqual<'input, 'a>, Expression<'input, 'a>)>, +) -> TypeParam<'input, 'a> { + let (equal, default) = match def { + Some((a, b)) => (Some(a), Some(b)), + None => (None, None), + }; + TypeParam { + param: TypeVarLike::ParamSpec(ParamSpec { name, star_tok }), + comma: Default::default(), + equal: equal, + star: "", + default: default, + star_tok: None, + } +} + +fn make_type_var_tuple<'input, 'a>( + star_tok: TokenRef<'input, 'a>, + name: Name<'input, 'a>, + def: Option<( + AssignEqual<'input, 'a>, + Option>, + Expression<'input, 'a>, + )>, +) -> TypeParam<'input, 'a> { + let (equal, default_star, default) = match def { + Some((a, b, c)) => (Some(a), b, Some(c)), + None => (None, None, None), + }; + let star = match default_star { + Some(a) => a.string, + None => "", + }; + + TypeParam { + param: TypeVarLike::TypeVarTuple(TypeVarTuple { name, star_tok }), + comma: Default::default(), + equal: equal, + star: star, + default: default, + star_tok: default_star, + } +} + +fn make_type_var<'input, 'a>( + name: Name<'input, 'a>, + bound: Option>, + def: Option<(AssignEqual<'input, 'a>, Expression<'input, 'a>)>, +) -> TypeParam<'input, 'a> { + let (bound, colon) = match bound { + Some(TypeParamBound(c, e)) => (Some(Box::new(e)), Some(make_colon(c))), + _ => (None, None), + }; + let (equal, default) = match def { + Some((a, b)) => (Some(a), Some(b)), + None => (None, None), + }; + TypeParam { + param: TypeVarLike::TypeVar(TypeVar { name, bound, colon }), + comma: Default::default(), + equal: equal, + star: "", + default: default, + star_tok: None, + } +} + +fn make_type_parameters<'input, 'a>( + lbracket: LeftSquareBracket<'input, 'a>, + params: Vec>, + rbracket: RightSquareBracket<'input, 'a>, +) -> TypeParameters<'input, 'a> { + TypeParameters { + lbracket, + params, + rbracket, + } +} + +fn make_type_alias<'input, 'a>( + type_tok: TokenRef<'input, 'a>, + name: Name<'input, 'a>, + type_parameters: Option>, + equals_tok: TokenRef<'input, 'a>, + value: Expression<'input, 'a>, +) -> TypeAlias<'input, 'a> { + let lbracket_tok = if let Some(tp) = &type_parameters { + Some(tp.lbracket.tok) + } else { + None + }; + TypeAlias { + type_tok, + name, + type_parameters, + equals_tok, + value: Box::new(value), + semicolon: Default::default(), + lbracket_tok, + } +} diff --git a/native/libcst/src/parser/mod.rs b/native/libcst/src/parser/mod.rs new file mode 100644 index 00000000..4e9b4654 --- /dev/null +++ b/native/libcst/src/parser/mod.rs @@ -0,0 +1,12 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +mod errors; +mod grammar; +mod numbers; + +pub use errors::ParserError; +pub(crate) use grammar::TokVec; +pub use grammar::{python, Result}; diff --git a/native/libcst/src/parser/numbers.rs b/native/libcst/src/parser/numbers.rs new file mode 100644 index 00000000..95db532b --- /dev/null +++ b/native/libcst/src/parser/numbers.rs @@ -0,0 +1,69 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use regex::Regex; + +use crate::nodes::deflated::{Expression, Float, Imaginary, Integer}; + +static HEX: &str = r"0[xX](?:_?[0-9a-fA-F])+"; +static BIN: &str = r"0[bB](?:_?[01])+"; +static OCT: &str = r"0[oO](?:_?[0-7])+"; +static DECIMAL: &str = r"(?:0(?:_?0)*|[1-9](?:_?[0-9])*)"; + +static EXPONENT: &str = r"[eE][-+]?[0-9](?:_?[0-9])*"; +// Note: these don't exactly match the python implementation (exponent is not included) +static POINT_FLOAT: &str = r"([0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?|\.[0-9](?:_?[0-9])*)"; +static EXP_FLOAT: &str = r"[0-9](?:_?[0-9])*"; + +thread_local! { + static INTEGER_RE: Regex = + Regex::new(format!("^({}|{}|{}|{})$", HEX, BIN, OCT, DECIMAL).as_str()).expect("regex"); + static FLOAT_RE: Regex = + Regex::new( + format!( + "^({}({})?|{}{})$", + POINT_FLOAT, EXPONENT, EXP_FLOAT, EXPONENT + ) + .as_str(), + ) + .expect("regex"); + static IMAGINARY_RE: Regex = + Regex::new( + format!( + r"^([0-9](?:_?[0-9])*[jJ]|({}({})?|{}{})[jJ])$", + POINT_FLOAT, EXPONENT, EXP_FLOAT, EXPONENT + ) + .as_str(), + ) + .expect("regex"); +} + +pub(crate) fn parse_number(raw: &str) -> Expression { + if INTEGER_RE.with(|r| r.is_match(raw)) { + Expression::Integer(Box::new(Integer { + value: raw, + lpar: Default::default(), + rpar: Default::default(), + })) + } else if FLOAT_RE.with(|r| r.is_match(raw)) { + Expression::Float(Box::new(Float { + value: raw, + lpar: Default::default(), + rpar: Default::default(), + })) + } else if IMAGINARY_RE.with(|r| r.is_match(raw)) { + Expression::Imaginary(Box::new(Imaginary { + value: raw, + lpar: Default::default(), + rpar: Default::default(), + })) + } else { + Expression::Integer(Box::new(Integer { + value: raw, + lpar: Default::default(), + rpar: Default::default(), + })) + } +} diff --git a/native/libcst/src/py.rs b/native/libcst/src/py.rs new file mode 100644 index 00000000..68c03744 --- /dev/null +++ b/native/libcst/src/py.rs @@ -0,0 +1,32 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use crate::nodes::traits::py::TryIntoPy; +use pyo3::prelude::*; + +#[pymodule(gil_used = false)] +#[pyo3(name = "native")] +pub fn libcst_native(_py: Python, m: &Bound) -> PyResult<()> { + #[pyfn(m)] + #[pyo3(signature = (source, encoding=None))] + fn parse_module(source: String, encoding: Option<&str>) -> PyResult> { + let m = crate::parse_module(source.as_str(), encoding)?; + Python::attach(|py| m.try_into_py(py)) + } + + #[pyfn(m)] + fn parse_expression(source: String) -> PyResult> { + let expr = crate::parse_expression(source.as_str())?; + Python::attach(|py| expr.try_into_py(py)) + } + + #[pyfn(m)] + fn parse_statement(source: String) -> PyResult> { + let stm = crate::parse_statement(source.as_str())?; + Python::attach(|py| stm.try_into_py(py)) + } + + Ok(()) +} diff --git a/native/libcst/src/tokenizer/core/LICENSE b/native/libcst/src/tokenizer/core/LICENSE new file mode 100644 index 00000000..7e9199f0 --- /dev/null +++ b/native/libcst/src/tokenizer/core/LICENSE @@ -0,0 +1,46 @@ +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +are retained in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/native/libcst/src/tokenizer/core/README.md b/native/libcst/src/tokenizer/core/README.md new file mode 100644 index 00000000..dfef60f4 --- /dev/null +++ b/native/libcst/src/tokenizer/core/README.md @@ -0,0 +1,2 @@ +Files in this directory are a derivative of CPython's tokenizer, and are +therefore available under the PSF license. diff --git a/native/libcst/src/tokenizer/core/mod.rs b/native/libcst/src/tokenizer/core/mod.rs new file mode 100644 index 00000000..120b6163 --- /dev/null +++ b/native/libcst/src/tokenizer/core/mod.rs @@ -0,0 +1,1214 @@ +// This implementation is Copyright (c) Meta Platforms, Inc. and affiliates. +// +// CPython 3.10.0a5 and the original C code this is based on is +// Copyright (c) 2001-2021 Python Software Foundation; All Rights Reserved +// +// Portions of this module (f-string splitting) are based on parso's tokenize.py, which is also PSF +// licensed. + +/// A port of CPython's tokenizer.c to Rust, with the following significant modifications: +/// +/// - PEP 263 (encoding detection) support isn't implemented. We depend on other code to do this for +/// us right now, and expect that the input is utf-8 by the time we see it. +/// +/// - Removed support for tokenizing from a file handle without reading the whole file in at once. +/// This significantly complicates parsing and memory is cheap, so we require that the whole file +/// is read in and converted to a unicode string before tokenization can begin. +/// +/// - Removed support for the interactive interpreter parsing mode. +/// +/// - Tweaked the `translate_newlines` functionality and moved most of it into TextPosition. `\r` +/// characters are no longer removed from the input buffer, so strings may contain `\r` characters +/// that should be normalized prior to being interpreted. +/// +/// - Added support for tracking more detailed position information via TextPosition. As a +/// consequence, consuming and then backing up a character (`tok_nextc`/`tok_backup`) is more +/// expensive, and we prefer to call `TextPosition::peek()` instead. +/// +/// - Removed support for tokenizing type comments. +/// +/// - Reduced the number of different supported token types to match what parso's tokenizer yields. +/// +/// - Uses some regular expressions. Regular expression are a good fit for a tokenizer, but we don't +/// use regular expressions everywhere because we can't generate as good of error messages with +/// them. +/// +/// - Added support for breaking apart f-strings into multiple tokens, matching Parso's tokenizer +/// behavior. CPython instead runs the parser recursively to parse f-strings. +/// +/// Also, in general, the code is less tightly optimized. The CPython implementation is crazy +/// optimized in ways that wouldn't translate well to rust (e.g. it parses the input utf-8 buffer as +/// raw bytes instead of unicode codepoints). +/// +/// The implementation should still be faster than any pure-Python implementation, and most +/// optimizations (avoiding string copies when slicing) carry over to Rust very well. +/// +/// Planned (not yet implemented) features: +/// +/// - Add more feature flags to more closely match the behavior of older versions of Python 3.x. +/// +/// - Support for a Python 2 mode that tokenizes Python 2.7 code and fails on certain new Python 3 +/// syntax that wasn't supported in 2.7. +/// +/// - Maybe add back support for tokenizing type comments? +/// +/// This implementation is tailored to LibCST's needs. If you're looking for a more general-purpose +/// pure-Rust Python parser, consider using [RustPython's parser][]. +/// +/// [RustPython's parser]: https://crates.io/crates/rustpython-parser +mod string_types; + +use regex::Regex; +use std::cell::RefCell; +use std::cmp::Ordering; +use std::convert::TryInto; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::rc::Rc; + +use crate::tokenizer::core::string_types::FTStringType; +use crate::tokenizer::{ + core::string_types::{FTStringNode, StringQuoteChar, StringQuoteSize}, + operators::OPERATOR_RE, + text_position::{TextPosition, TextPositionSnapshot}, + whitespace_parser::State as WhitespaceState, +}; + +/// The maximum number of indentation levels at any given point in time. CPython's tokenizer.c caps +/// this to avoid the complexity of allocating a dynamic array, but we're using a Vec, so it's not +/// necessary, but we're keeping it to maintain compatibility. +const MAX_INDENT: usize = 100; + +// MAX_CHAR should be std::char::MAX once assoc_char_consts is stablized. +// https://github.com/rust-lang/rust/issues/71763 +const MAX_CHAR: char = '\u{10ffff}'; + +thread_local! { + static SPACE_TAB_FORMFEED_RE: Regex = Regex::new(r"\A[ \f\t]+").expect("regex"); + static ANY_NON_NEWLINE_RE: Regex = Regex::new(r"\A[^\r\n]+").expect("regex"); + static STRING_PREFIX_RE: Regex = + Regex::new(r"\A(?i)(u|[bf]r|r[bft]|r|b|f|t)").expect("regex"); + static POTENTIAL_IDENTIFIER_TAIL_RE: Regex = + Regex::new(r"\A([a-zA-Z0-9_]|[^\x00-\x7f])+").expect("regex"); + static DECIMAL_DOT_DIGIT_RE: Regex = Regex::new(r"\A\.[0-9]").expect("regex"); + static DECIMAL_TAIL_RE: Regex = + Regex::new(r"\A[0-9](_?[0-9])*").expect("regex"); + static HEXADECIMAL_TAIL_RE: Regex = + Regex::new(r"\A(_?[0-9a-fA-F])+").expect("regex"); + static OCTAL_TAIL_RE: Regex = Regex::new(r"\A(_?[0-7])+").expect("regex"); + static BINARY_TAIL_RE: Regex = Regex::new(r"\A(_?[01])+").expect("regex"); + + /// Used to verify identifiers when there's a non-ascii character in them. + // This changes across unicode revisions. We'd need to ship our own unicode tables to 100% match a + // given Python version's behavior. + static UNICODE_IDENTIFIER_RE: Regex = + Regex::new(r"\A[\p{XID_Start}_]\p{XID_Continue}*\z").expect("regex"); +} + +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub enum TokType { + String, + Name, + Number, + Op, + Newline, + Indent, + Dedent, + Async, + Await, + FStringStart, + FStringString, + FStringEnd, + TStringStart, + TStringString, + TStringEnd, + EndMarker, +} + +#[derive(Debug, thiserror::Error, Eq, PartialEq)] +pub enum TokError<'t> { + #[error("inconsistent mixing of tabs and spaces")] + TabSpace, + #[error("too many indentation levels")] + TooDeep, + #[error("no matching outer block for dedent")] + Dedent, + #[error("unexpected characters after a line continuation")] + LineContinuation, + #[error("unexpected end of file after a line continuation")] + LineContinuationEof, + #[error("{0:?} is not a valid identifier")] + BadIdentifier(&'t str), + #[error("invalid decimal literal")] + BadDecimal, + #[error( + "{}{}", + "leading zeros in decimal integer literals are not permitted; use an 0o prefix for octal ", + "integers" + )] + BadDecimalLeadingZeros, + #[error("invalid hexadecimal literal")] + BadHexadecimal, + #[error("invalid octal literal")] + BadOctal, + #[error("invalid digit {0:?} in octal literal")] + BadOctalDigit(char), + #[error("invalid binary literal")] + BadBinary, + #[error("invalid digit {0:?} in binary literal")] + BadBinaryDigit(char), + #[error("unterminated string literal")] + UnterminatedString, + #[error("unterminated triple-quoted string literal")] + UnterminatedTripleQuotedString, + #[error("unmatched {0:?}")] + UnmatchedClosingParen(char), + #[error("Closing parenthesis {1:?} does not match opening parenthesis {0:?}")] + MismatchedClosingParen(char, char), + #[error("Closing parenthesis {1:?} does not match opening parenthesis {0:?} on line {2:}")] + MismatchedClosingParenOnLine(char, char, usize), + #[error("{0:?} is not a valid character in this position")] + BadCharacter(char), +} + +// Clone is used for async_hacks, which needs to speculatively look-ahead one token. +#[derive(Clone)] +pub struct TokState<'t> { + /// The full program's source code (similar to `tok->str` or `tok->buf` in the CPython source + /// code). We don't support reading the file line-by-line from a file handle like CPython does, + /// so this is the whole program pre-converted to utf-8. + pub text_pos: TextPosition<'t>, + /// Start of the most recently returned token. + pub start_pos: TextPositionSnapshot, + /// True after we've encountered an error or there's no more text to process. + done: bool, + /// How many spaces a tab counts as (always 8) + tab_size: usize, + /// How many spaces a tab counts as in alt_indent_stack (always 1) + alt_tab_size: usize, + /// Stack of indentation levels where a tab is counted as 8 characters, used for tracking + /// dedents. Length is current indentation level. Should never have more than MAX_INDENT + /// entries. + indent_stack: Vec, + /// Used to check that tabs and spaces are not mixed. + alt_indent_stack: Vec, + /// Beginning of line. True if at the beginning of a new line. + at_bol: bool, + /// The number of bytes at the beginning of the line, as measured by consume_bol_whitespace. + /// Used by libcst to capture (and then validate and parse) the indentation. + pub bol_width: usize, + /// Set by `consume_bol_whitespace`, true if the current line is blank. + blank_line: bool, + /// Pending intents (if > 0) or dedents (if < 0). Used when multiple tokens need to be produced + /// at once. + pending_indents: i32, + /// Length is `() [] {}` parenthesis nesting level. Used to allow free continuations inside + /// them. Stack entries are to verify that closing parenthesis match opening parenthesis. + /// Tuple is (character, lineno). + paren_stack: Vec<(char, usize)>, + /// Whether we're in a continuation line. + cont_line: bool, + + /// True if async/await aren't always keywords. + async_hacks: bool, + /// True if tokens are inside an 'async def' body. + async_def: bool, + /// Indentation level of the outermost 'async def'. + async_def_indent: usize, + /// True if the outermost 'async def' had at least one NEWLINE token after it. + async_def_nl: bool, + + /// Splits f-strings into multiple tokens instead of a STRING token if true. + /// + /// CPython doesn't directly split f-strings in the tokenizer (and therefore doesn't support + /// this option). Instead, when the parser encounters an f-string, it recursively re-runs the + /// tokenizer and parser. + /// + /// Supporting this at the tokenizer-level is pretty nasty and adds a lot of complexity. + /// Eventually, we should probably support this at the parser-level instead. + split_ftstring: bool, + ftstring_stack: Vec, + + missing_nl_before_eof: bool, +} + +pub struct TokConfig { + /// Used in Python 3.5 and 3.6. If enabled, async/await are sometimes keywords and sometimes + /// identifiers, depending on if they're being used in the context of an async function. This + /// breaks async comprehensions outside of async functions. + pub async_hacks: bool, + pub split_ftstring: bool, + // Not currently supported: + // type_comments: bool, +} + +fn is_digit>>(ch: C) -> bool { + matches!(ch.into(), Some('0'..='9')) +} + +#[derive(Debug)] +enum NumberState { + StartDigit, + Fraction, + Exponent, + Imaginary, +} + +impl<'t> TokState<'t> { + pub fn new(text: &'t str, config: &TokConfig) -> Self { + let text_pos = TextPosition::new(text); + let start_pos = (&text_pos).into(); + Self { + text_pos, + start_pos, + done: false, + tab_size: 8, + alt_tab_size: 1, + indent_stack: Vec::new(), + alt_indent_stack: Vec::new(), + at_bol: true, + bol_width: 0, + blank_line: false, + pending_indents: 0, + paren_stack: Vec::new(), + cont_line: false, + async_hacks: config.async_hacks, + async_def: false, + async_def_indent: 0, + async_def_nl: false, + split_ftstring: config.split_ftstring, + ftstring_stack: Vec::new(), + missing_nl_before_eof: text.is_empty() || text.as_bytes()[text.len() - 1] != b'\n', + } + } + + pub fn is_parenthesized(&self) -> bool { + !self.paren_stack.is_empty() + } + + /// Implementation of `next()`, wrapped by next() to allow for easier error handling. Roughly + /// equivalent to `tok_get` in the C source code. + fn next_inner(&mut self) -> Result> { + if self.split_ftstring { + if let Some(tos) = self.ftstring_stack.last() { + if !tos.is_in_expr() { + self.start_pos = (&self.text_pos).into(); + let is_in_format_spec = tos.is_in_format_spec(); + let is_raw_string = tos.is_raw_string; + if let Some(tok) = + self.maybe_consume_ftstring_string(is_in_format_spec, is_raw_string)? + { + return Ok(tok); + } + if let Some(tok) = self.maybe_consume_ftstring_end() { + return Ok(tok); + } + } + } + } + + // This will never consume a token, but it may set blank_line and it may set + // pending_indents. + self.consume_bol_whitespace()?; + + // Return pending indents/dedents + if let Some(t) = self.process_pending_indents() { + self.start_pos = (&self.text_pos).into(); + return Ok(t); + } + + self.maybe_close_async_def(); + + 'again: loop { + // Skip spaces + SPACE_TAB_FORMFEED_RE.with(|v| self.text_pos.consume(v)); + + // Skip comment, unless it's a type comment + if self.text_pos.peek() == Some('#') { + ANY_NON_NEWLINE_RE.with(|v| self.text_pos.consume(v)); + // type_comment is not supported + } + + // Set start of current token + self.start_pos = (&self.text_pos).into(); + + return match self.text_pos.peek() { + // Check for EOF now + None => { + if self.missing_nl_before_eof && !self.blank_line { + self.at_bol = true; + self.missing_nl_before_eof = false; + Ok(TokType::Newline) + } else { + let hanging_indents = self.indent_stack.len() as i32; + if self.pending_indents == 0 && hanging_indents != 0 { + // We've reached EOF but there are still pending indents not + // accounted for. Flush them out. + self.pending_indents = -hanging_indents; + self.indent_stack.clear(); + self.alt_indent_stack.clear(); + self.missing_nl_before_eof = false; + } + if let Some(t) = self.process_pending_indents() { + Ok(t) + } else { + Ok(TokType::EndMarker) + } + } + } + + // Identifier (most frequent token!) + Some('a'..='z') | Some('A'..='Z') | Some('_') | Some('\u{80}'..=MAX_CHAR) => { + self.consume_identifier_or_prefixed_string() + } + + // Newline + Some('\n') => { + self.text_pos.next(); + self.at_bol = true; + if self.split_ftstring + && self + .ftstring_stack + .last() + .map(|node| node.allow_multiline()) + == Some(false) + { + Err(TokError::UnterminatedString) + } else if self.blank_line || !self.paren_stack.is_empty() { + // this newline doesn't count + // recurse (basically `goto nextline`) + self.next_inner() + } else { + self.cont_line = false; + if self.async_def { + self.async_def_nl = true; + } + Ok(TokType::Newline) + } + } + + // Ellipsis + Some('.') if self.text_pos.consume("...") => { + return Ok(TokType::Op); + } + + // Number starting with period + Some('.') if DECIMAL_DOT_DIGIT_RE.with(|r| self.text_pos.matches(r)) => { + self.consume_number(NumberState::Fraction) + } + + // Dot + Some('.') => { + self.text_pos.next(); + Ok(TokType::Op) + } + + // Number + Some('0'..='9') => self.consume_number(NumberState::StartDigit), + + // String + Some('\'') | Some('"') => self.consume_string(), + + // Line continuation + Some('\\') => { + self.text_pos.next(); + if let Some('\n') = self.text_pos.next() { + if self.text_pos.peek() == None { + Err(TokError::LineContinuationEof) + } else { + self.cont_line = true; + // Read next line + continue 'again; + } + } else { + Err(TokError::LineContinuation) + } + } + + Some(ch @ '(') | Some(ch @ '[') | Some(ch @ '{') => { + self.text_pos.next(); + if let Some(tos) = self.ftstring_stack.last_mut() { + tos.open_parentheses(); + } + self.paren_stack.push((ch, self.text_pos.line_number())); + Ok(TokType::Op) + } + + Some(closing @ ')') | Some(closing @ ']') | Some(closing @ '}') => { + self.text_pos.next(); + if let Some(tos) = self.ftstring_stack.last_mut() { + tos.close_parentheses(); + } + if let Some((opening, line_number)) = self.paren_stack.pop() { + match (opening, closing) { + ('(', ')') | ('[', ']') | ('{', '}') => Ok(TokType::Op), + _ => { + if line_number != self.text_pos.line_number() { + Err(TokError::MismatchedClosingParenOnLine( + opening, + closing, + line_number, + )) + } else { + Err(TokError::MismatchedClosingParen(opening, closing)) + } + } + } + } else { + Err(TokError::UnmatchedClosingParen(closing)) + } + } + + Some(':') + if self + .ftstring_stack + .last() + .map(|tos| tos.parentheses_count - tos.format_spec_count == 1) + .unwrap_or(false) => + { + // N.B. This may capture the walrus operator and pass it to the formatter. + // That's intentional. PEP 572 says: "Assignment expressions inside of f-strings + // require parentheses." + // + // >>> f'{x:=10}' # Valid, passes '=10' to formatter + let tos = self + .ftstring_stack + .last_mut() + .expect("ftstring_stack is not empty"); + tos.format_spec_count += 1; + self.text_pos.next(); + Ok(TokType::Op) + } + + // Operator + Some(_) if OPERATOR_RE.with(|r| self.text_pos.consume(r)) => Ok(TokType::Op), + + // Bad character + // If nothing works, fall back to this error. CPython returns an OP in this case, + // and then just relies on the parser to generate a generic syntax error. + Some(ch) => Err(TokError::BadCharacter(ch)), + }; + } + } + + /// Consumes the whitespace (and comments) at the beginning of the line. May emit an error. Will + /// mutate `pending_indents`, so you must check `pending_indents` after calling this. + fn consume_bol_whitespace(&mut self) -> Result<(), TokError<'t>> { + self.blank_line = false; + if !self.at_bol { + return Ok(()); + } + + let mut col = 0; // column where tab counts as 8 characters + let mut altcol = 0; // column where tab counts as 1 character + self.at_bol = false; + self.bol_width = 0; + + // consume space, tab, and formfeed characters + loop { + match self.text_pos.peek() { + Some(' ') => { + col += 1; + altcol += 1; + self.bol_width += 1; + self.text_pos.next(); + } + Some('\t') => { + // Increment both col and altcol using different tab sizes. Tabs snap to the + // next multiple of self.tab_size. + col = (col / self.tab_size + 1) * self.tab_size; + // altcol will later be used for detecting mixed tabs and spaces. + altcol = (altcol / self.alt_tab_size + 1) * self.alt_tab_size; + self.bol_width += 1; + self.text_pos.next(); + } + // Control-L (formfeed) for emacs users + Some('\x0c') => { + col = 0; + altcol = 0; + self.bol_width += 1; + self.text_pos.next(); + } + _ => { + break; + } + } + } + + // Lines with only whitespace and/or comments and/or a line continuation + // character shouldn't affect the indentation and are not passed to the parser + // as NEWLINE tokens. + self.blank_line = matches!( + self.text_pos.peek(), + Some('#') | Some('\n') | Some('\\') | None + ); + + if self.blank_line || !self.paren_stack.is_empty() { + return Ok(()); + } + + let prev_col = self.indent_stack.last().unwrap_or(&0); + match col.cmp(prev_col) { + Ordering::Equal => { + // No change + if altcol != *self.alt_indent_stack.last().unwrap_or(&0) { + return Err(TokError::TabSpace); + } + } + Ordering::Greater => { + // col > prev_col + // Indent -- always one + if self.indent_stack.len() + 1 >= MAX_INDENT { + return Err(TokError::TooDeep); + } + // col > prev_col, therefore altcol > prev_altcol, unless there's badly mixed tabs + // and spaces + if altcol <= *self.alt_indent_stack.last().unwrap_or(&0) { + return Err(TokError::TabSpace); + } + // only emit indents if we're not at EOF + if self.text_pos.peek().is_some() { + self.pending_indents += 1; + self.indent_stack.push(col); + self.alt_indent_stack.push(altcol); + } + } + Ordering::Less => { + // c < prev_col + // Dedent -- any number, must be consistent + while matches!(self.indent_stack.last(), Some(&ind_cols) if col < ind_cols) { + self.pending_indents -= 1; + self.indent_stack.pop(); + self.alt_indent_stack.pop(); + } + if col != *self.indent_stack.last().unwrap_or(&0) { + return Err(TokError::Dedent); + } + if altcol != *self.alt_indent_stack.last().unwrap_or(&0) { + return Err(TokError::TabSpace); + } + } + } + + Ok(()) + } + + fn process_pending_indents(&mut self) -> Option { + if self.pending_indents != 0 { + if self.pending_indents < 0 { + self.pending_indents += 1; + Some(TokType::Dedent) + } else { + self.pending_indents -= 1; + Some(TokType::Indent) + } + } else { + None + } + } + + fn maybe_close_async_def(&mut self) { + // Check if we are closing an async function + if self.async_def + && !self.blank_line + // (This is irrelevant to the rust implementation which doesn't support type_comments + // yet, but the comment is preserved for posterity) + // Due to some implementation artifacts of type comments, a TYPE_COMMENT at the start of + // a function won't set an indentation level and it will produce a NEWLINE after it. To + // avoid spuriously ending an async function due to this, wait until we have some + // non-newline char in front of us. + // && self.text_pos.peek() == Some('\n') + && self.paren_stack.is_empty() + // There was a NEWLINE after ASYNC DEF, so we're past the signature. + && self.async_def_nl + // Current indentation level is less than where the async function was defined + && self.async_def_indent >= self.indent_stack.len() + { + self.async_def = false; + self.async_def_indent = 0; + self.async_def_nl = false; + } + } + + fn consume_identifier_or_prefixed_string(&mut self) -> Result> { + // Process the various legal combinations of b"", r"", u"",f"", and t"". + if STRING_PREFIX_RE.with(|r| self.text_pos.consume(r)) { + if let Some('"') | Some('\'') = self.text_pos.peek() { + // We found a string, not an identifier. Bail! + if self.split_ftstring { + let res = match self + .text_pos + .slice_from_start_pos(&self.start_pos) + .chars() + .find(|c| matches!(c, 'f' | 'F' | 't' | 'T')) + { + Some('f' | 'F') => Some(FTStringType::FString), + Some('t' | 'T') => Some(FTStringType::TString), + _ => None, + }; + if let Some(str_type) = res { + // Consume the prefix and return the start token + return self.consume_prefixed_string_start(str_type); + } + } + return self.consume_string(); + } + } else { + // the next character must be a potential identifier start, aka `[a-zA-Z_]|[^\x00-\x7f]` + let first_ch = self.text_pos.next(); + debug_assert!(matches!( + first_ch, + Some('a'..='z') | Some('A'..='Z') | Some('_') | Some('\u{80}'..=MAX_CHAR) + )); + } + POTENTIAL_IDENTIFIER_TAIL_RE.with(|r| self.text_pos.consume(r)); + let identifier_str = self.text_pos.slice_from_start_pos(&self.start_pos); + if !verify_identifier(identifier_str) { + // TODO: async/await + return Err(TokError::BadIdentifier(identifier_str)); + } + + let allow_async = !self.async_hacks || self.async_def; + match (identifier_str, allow_async) { + ("async", true) => Ok(TokType::Async), + ("await", true) => Ok(TokType::Await), + ("async", false) => { + // The current token is 'async' and async_hacks is enabled. + // Look ahead one token to see if that is 'def'. + // This clone is expensive, but modern code doesn't need async_hacks. + let mut lookahead_state = self.clone(); + if lookahead_state.next_inner() == Ok(TokType::Name) + && lookahead_state + .text_pos + .slice_from_start_pos(&lookahead_state.start_pos) + == "def" + { + self.async_def = true; + self.async_def_indent = self.indent_stack.len(); + Ok(TokType::Async) + } else { + Ok(TokType::Name) + } + } + _ => Ok(TokType::Name), + } + } + + fn consume_number(&mut self, state: NumberState) -> Result> { + // This is organized as a state machine. The match could also be rewritten into multiple + // functions, but this is closer to how the C code is written (with gotos). + match state { + NumberState::StartDigit => { + let start_digit_ch = self.text_pos.peek(); + debug_assert!(is_digit(start_digit_ch)); + + if start_digit_ch == Some('0') { + self.text_pos.next(); + match self.text_pos.peek() { + Some('x') | Some('X') => { + self.text_pos.next(); + if !HEXADECIMAL_TAIL_RE.with(|r| self.text_pos.consume(r)) + || self.text_pos.peek() == Some('_') + { + Err(TokError::BadHexadecimal) + } else { + Ok(TokType::Number) + } + } + Some('o') | Some('O') => { + self.text_pos.next(); + if !OCTAL_TAIL_RE.with(|r| self.text_pos.consume(r)) + || self.text_pos.peek() == Some('_') + { + return Err(TokError::BadOctal); + } + if let Some(next_ch) = self.text_pos.peek() { + if is_digit(next_ch) { + return Err(TokError::BadOctalDigit(next_ch)); + } + } + Ok(TokType::Number) + } + Some('b') | Some('B') => { + self.text_pos.next(); + if !BINARY_TAIL_RE.with(|r| self.text_pos.consume(r)) + || self.text_pos.peek() == Some('_') + { + return Err(TokError::BadBinary); + } + if let Some(next_ch) = self.text_pos.peek() { + if is_digit(next_ch) { + return Err(TokError::BadBinaryDigit(next_ch)); + } + } + Ok(TokType::Number) + } + _ => { + let mut nonzero = false; + // Maybe old-style octal. In any case, allow '0' as a literal + loop { + if self.text_pos.peek() == Some('_') { + self.text_pos.next(); + if !is_digit(self.text_pos.peek()) { + return Err(TokError::BadDecimal); + } + } + if self.text_pos.peek() != Some('0') { + break; + } + self.text_pos.next(); + } + if is_digit(self.text_pos.peek()) { + nonzero = true; + self.consume_decimal_tail()?; + } + if self.text_pos.peek() == Some('.') { + self.consume_number(NumberState::Fraction) + } else if let Some('e') | Some('E') = self.text_pos.peek() { + self.consume_number(NumberState::Exponent) + } else if let Some('j') | Some('J') = self.text_pos.peek() { + self.consume_number(NumberState::Imaginary) + } else if nonzero { + Err(TokError::BadDecimalLeadingZeros) + } else { + Ok(TokType::Number) + } + } + } + } else { + self.consume_decimal_tail()?; + if self.text_pos.peek() == Some('.') { + self.consume_number(NumberState::Fraction) + } else if let Some('e') | Some('E') = self.text_pos.peek() { + self.consume_number(NumberState::Exponent) + } else if let Some('j') | Some('J') = self.text_pos.peek() { + self.consume_number(NumberState::Imaginary) + } else { + Ok(TokType::Number) + } + } + } + NumberState::Fraction => { + let dot_ch = self.text_pos.next(); + debug_assert!(dot_ch == Some('.')); + + if is_digit(self.text_pos.peek()) { + self.consume_decimal_tail()?; + } + if let Some('e') | Some('E') = self.text_pos.peek() { + self.consume_number(NumberState::Exponent) + } else if let Some('j') | Some('J') = self.text_pos.peek() { + self.consume_number(NumberState::Imaginary) + } else { + Ok(TokType::Number) + } + } + NumberState::Exponent => { + let e_ch = self.text_pos.next(); + debug_assert!(matches!(e_ch, Some('e') | Some('E'))); + + if let Some('+') | Some('-') = self.text_pos.peek() { + self.text_pos.next(); + if !is_digit(self.text_pos.peek()) { + return Err(TokError::BadDecimal); + } + } else if !is_digit(self.text_pos.peek()) { + // Don't consume the 'e'. It could be part of an identifier after this number. + self.text_pos.backup_no_newline(); + return Ok(TokType::Number); + } + self.consume_decimal_tail()?; + if let Some('j') | Some('J') = self.text_pos.peek() { + self.consume_number(NumberState::Imaginary) + } else { + Ok(TokType::Number) + } + } + NumberState::Imaginary => { + let j_ch = self.text_pos.next(); + debug_assert!(matches!(j_ch, Some('j') | Some('J'))); + + Ok(TokType::Number) + } + } + } + + /// Processes a decimal tail. This is the bit after the dot or after an E in a float. + fn consume_decimal_tail(&mut self) -> Result<(), TokError<'t>> { + let result = DECIMAL_TAIL_RE.with(|r| self.text_pos.consume(r)); + // Assumption: If we've been called, the first character is an integer, so we must have a + // regex match + debug_assert!(result, "try_decimal_tail was called on a non-digit char"); + if self.text_pos.peek() == Some('_') { + Err(TokError::BadDecimal) + } else { + Ok(()) + } + } + + fn consume_open_quote(&mut self) -> (StringQuoteChar, StringQuoteSize) { + let quote_char: StringQuoteChar = self + .text_pos + .peek() + .try_into() + .expect("the next character must be a quote when calling consume_open_quote"); + let triple_quote_pattern = quote_char.triple_str(); + let quote_size = if self.text_pos.consume(triple_quote_pattern) { + StringQuoteSize::Triple + } else { + self.text_pos.next(); // consume the single character instead + StringQuoteSize::Single + }; + (quote_char, quote_size) + } + + fn consume_string(&mut self) -> Result> { + // Assumption: The opening quote has not been consumed. Leading characters (b, r, f, etc) + // have been consumed. + let (quote_char, quote_size) = self.consume_open_quote(); + let quote_raw = quote_char.into(); + + let mut end_quote_size: usize = 0; + let quote_usize: usize = quote_size.into(); + while end_quote_size != quote_usize { + match (self.text_pos.next(), quote_size) { + (None, StringQuoteSize::Triple) => { + return Err(TokError::UnterminatedTripleQuotedString); + } + (None, StringQuoteSize::Single) | (Some('\n'), StringQuoteSize::Single) => { + return Err(TokError::UnterminatedString); + } + (ch @ Some('\''), _) | (ch @ Some('"'), _) if ch == Some(quote_raw) => { + end_quote_size += 1; + } + (Some(ch), _) => { + end_quote_size = 0; + if ch == '\\' { + // skip escaped char + self.text_pos.next(); + } + } + } + } + + Ok(TokType::String) + } + + fn consume_prefixed_string_start( + &mut self, + str_type: FTStringType, + ) -> Result> { + // Consumes everything after the (f|t) but before the actual string. + let (quote_char, quote_size) = self.consume_open_quote(); + let is_raw_string = self + .text_pos + .slice_from_start_pos(&self.start_pos) + .contains(&['r', 'R'][..]); + self.ftstring_stack.push(FTStringNode::new( + quote_char, + quote_size, + is_raw_string, + str_type.clone(), + )); + + match str_type { + FTStringType::FString => Ok(TokType::FStringStart), + FTStringType::TString => Ok(TokType::TStringStart), + } + } + + fn maybe_consume_ftstring_string( + &mut self, + is_in_format_spec: bool, + is_raw_string: bool, + ) -> Result, TokError<'t>> { + let allow_multiline = self + .ftstring_stack + .last() + .map(|node| node.allow_multiline()) + == Some(true); + let str_type = self + .ftstring_stack + .last() + .map(|node| node.string_type.clone()); + let mut in_named_unicode: bool = false; + let mut ok_result = Ok(None); // value to return if we reach the end and don't error out + 'outer: loop { + match (self.text_pos.peek(), allow_multiline) { + (None, true) => { + return Err(TokError::UnterminatedTripleQuotedString); + } + (None, false) | (Some('\n'), false) => { + return Err(TokError::UnterminatedString); + } + (ch @ Some('\''), _) | (ch @ Some('"'), _) => { + // see if this actually terminates the most recent fstring + if let Some(node) = self.ftstring_stack.last() { + if ch == Some(node.quote_char.into()) { + match node.quote_size { + StringQuoteSize::Single => { + break 'outer; + } + StringQuoteSize::Triple => { + if self.text_pos.matches(node.quote_char.triple_str()) { + break 'outer; + } + } + } + } + } + self.text_pos.next(); + } + (Some('\\'), _) if !is_raw_string => { + self.text_pos.next(); + if is_in_format_spec { + if let Some('{') | Some('}') = self.text_pos.peek() { + // don't consume { or } because we want those to be interpreted as OP + // tokens + } else { + // skip escaped char (e.g. \', \", or newline/line continuation) + self.text_pos.next(); + } + } else if let Some( + '\n' + | '\\' + | '\'' + | '"' + | 'a' + | 'b' + | 'f' + | 'n' + | 'r' + | 't' + | 'v' + | 'x' + | '0'..='9' + | 'N' + | 'u' + | 'U', + ) = self.text_pos.peek() + { + // skip escaped char + let next_ch = self.text_pos.next(); + // check if this is a \N sequence + if let Some('N') = next_ch { + // swallow the next open curly brace if it exists + if let Some('{') = self.text_pos.peek() { + in_named_unicode = true; + self.text_pos.next(); + } + } + } + } + (Some('\\'), _) if is_raw_string => { + self.text_pos.next(); + // skip escaped end-of-string marker or backslash + if let Some('"' | '\'' | '\\') = self.text_pos.peek() { + self.text_pos.next(); + } + } + (Some('{'), _) => { + if is_in_format_spec { + // don't actually consume the {, and generate an OP for it instead + break 'outer; + } + let consumed_double = self.text_pos.consume("{{"); + if !consumed_double { + break 'outer; + } + } + (Some('}'), _) => { + if in_named_unicode { + in_named_unicode = false; + self.text_pos.next(); + } else if is_in_format_spec { + // don't actually consume the }, and generate an OP for it instead + break 'outer; + } else if !self.text_pos.consume("}}") { + return Err(TokError::UnmatchedClosingParen('}')); + } + } + _ => { + self.text_pos.next(); + } + } + ok_result = match str_type { + Some(FTStringType::FString) => Ok(Some(TokType::FStringString)), + Some(FTStringType::TString) => Ok(Some(TokType::TStringString)), + None => unreachable!("We should always have a string type"), + }; + } + ok_result + } + + fn maybe_consume_ftstring_end(&mut self) -> Option { + let ch = self.text_pos.peek(); + if let Some(node) = self.ftstring_stack.last() { + if ch == Some(node.quote_char.into()) { + if node.quote_size == StringQuoteSize::Triple { + self.text_pos.consume(node.quote_char.triple_str()); + } else { + self.text_pos.next(); // already matched + } + let tok_type = match node.string_type { + FTStringType::FString => TokType::FStringEnd, + FTStringType::TString => TokType::TStringEnd, + }; + self.ftstring_stack.pop(); + return Some(tok_type); + } + } + None + } +} + +impl<'t> Iterator for TokState<'t> { + type Item = Result>; + + /// Returns the next token type. + fn next(&mut self) -> Option>> { + // This implementation wraps `next_inner`, which does the actual work. + if self.done { + None + } else { + match self.next_inner() { + Err(err) => { + self.done = true; + Some(Err(err)) + } + Ok(TokType::EndMarker) => { + self.done = true; + Some(Ok(TokType::EndMarker)) + } + Ok(t) => Some(Ok(t)), + } + } + } +} + +/// Returns true if the given string is a valid Python 3.x identifier. Follows [PEP 3131][]. +/// +/// [PEP 3131]: https://www.python.org/dev/peps/pep-3131/ +fn verify_identifier(name: &str) -> bool { + // TODO: If `name` is non-ascii, must first normalize name to NFKC. + // Common case: If the entire string is ascii, we can avoid the more expensive regex check, + // since the tokenizer already validates ascii characters before calling us. + name.is_ascii() || UNICODE_IDENTIFIER_RE.with(|r| r.is_match(name)) +} + +#[derive(Clone)] +pub struct Token<'a> { + pub r#type: TokType, + pub string: &'a str, + pub start_pos: TextPositionSnapshot, + pub end_pos: TextPositionSnapshot, + pub whitespace_before: Rc>>, + pub whitespace_after: Rc>>, + pub relative_indent: Option<&'a str>, +} + +impl<'a> Debug for Token<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + write!( + f, + "Token({:?}, {}, start={:?}, end={:?}, relative_indent={:?}, ws_before={:?}, ws_after={:?}", + self.r#type, self.string, self.start_pos, self.end_pos, self.relative_indent, self.whitespace_before, self.whitespace_after + ) + } +} + +// Dummy Eq implementation. We never compare Tokens like this +impl<'a> PartialEq for Token<'a> { + fn eq(&self, _other: &Self) -> bool { + true + } +} + +impl<'a> Eq for Token<'a> {} + +pub struct TokenIterator<'a> { + previous_whitespace: Option>>>, + core_state: TokState<'a>, + absolute_indents: Vec<&'a str>, +} + +impl<'a> TokenIterator<'a> { + pub fn new(module_text: &'a str, config: &TokConfig) -> Self { + Self { + previous_whitespace: None, + absolute_indents: vec![], + core_state: TokState::new(module_text, config), + } + } +} + +impl<'a> Iterator for TokenIterator<'a> { + type Item = Result, TokError<'a>>; + + fn next(&mut self) -> Option { + let next = self.core_state.next(); + next.as_ref()?; + Some((|| { + let tok_type = next.unwrap()?; + let relative_indent = match tok_type { + TokType::Indent => { + let end_idx = self.core_state.text_pos.byte_idx(); + let start_idx = end_idx - self.core_state.bol_width; + let absolute_indent = &self.core_state.text_pos.text()[start_idx..end_idx]; + let relative_indent = + if let Some(prev_absolute_indent) = self.absolute_indents.last() { + if let Some(ri) = absolute_indent.strip_prefix(prev_absolute_indent) { + ri + } else { + // TODO: return the correct exception type, improve error message + return Err(TokError::Dedent); + } + } else { + // there's no previous indent, absolute_indent is relative_indent + absolute_indent + }; + self.absolute_indents.push(absolute_indent); + // HACKY: mutate and fixup the previous whitespace state + if let Some(ws) = self.previous_whitespace.as_mut() { + ws.borrow_mut().absolute_indent = absolute_indent; + } + Some(relative_indent) + } + TokType::Dedent => { + self.absolute_indents.pop(); + // HACKY: mutate and fixup the previous whitespace state + if let Some(ws) = self.previous_whitespace.as_mut() { + ws.borrow_mut().absolute_indent = + self.absolute_indents.last().unwrap_or(&""); + } + None + } + _ => None, + }; + let text_pos = &self.core_state.text_pos; + let whitespace_before = self.previous_whitespace.clone().unwrap_or_default(); + let whitespace_after = match tok_type { + TokType::Indent | TokType::Dedent | TokType::EndMarker => whitespace_before.clone(), + _ => Rc::new(RefCell::new(WhitespaceState { + line: text_pos.line_number(), + column: text_pos.char_column_number(), + column_byte: text_pos.byte_column_number(), + byte_offset: text_pos.byte_idx(), + absolute_indent: self.absolute_indents.last().unwrap_or(&""), + is_parenthesized: self.core_state.is_parenthesized(), + })), + }; + self.previous_whitespace = Some(whitespace_after.clone()); + + Ok(Token { + r#type: tok_type, + string: text_pos.slice_from_start_pos(&self.core_state.start_pos), + start_pos: self.core_state.start_pos.clone(), + end_pos: text_pos.into(), + whitespace_after: whitespace_after.clone(), + whitespace_before: whitespace_before.clone(), + relative_indent, + }) + })()) + } +} diff --git a/native/libcst/src/tokenizer/core/string_types.rs b/native/libcst/src/tokenizer/core/string_types.rs new file mode 100644 index 00000000..09a51851 --- /dev/null +++ b/native/libcst/src/tokenizer/core/string_types.rs @@ -0,0 +1,128 @@ +// This implementation is Copyright (c) Meta Platforms, Inc. and affiliates. +// +// CPython 3.10.0a5 and the original C code this is based on is +// Copyright (c) 2001-2021 Python Software Foundation; All Rights Reserved +// +// Portions of this module (f-string splitting) are based on parso's tokenize.py, which is also PSF +// licensed. + +/// Helper types for string processing in the core tokenizer. +use std::convert::TryFrom; + +use crate::tokenizer::text_position::TextPositionSnapshot; + +#[derive(Clone, Copy, Eq, PartialEq)] +pub enum StringQuoteSize { + Single, + Triple, +} + +impl From for usize { + fn from(qs: StringQuoteSize) -> Self { + match qs { + StringQuoteSize::Single => 1, + StringQuoteSize::Triple => 3, + } + } +} + +#[derive(Clone, Copy)] +pub enum StringQuoteChar { + Apostrophe, + DoubleQuote, +} + +impl StringQuoteChar { + pub fn triple_str(&self) -> &'static str { + match self { + Self::Apostrophe => "'''", + Self::DoubleQuote => "\"\"\"", + } + } +} + +impl From for char { + fn from(ch: StringQuoteChar) -> Self { + match ch { + StringQuoteChar::Apostrophe => '\'', + StringQuoteChar::DoubleQuote => '"', + } + } +} + +#[derive(Debug, thiserror::Error)] +#[error("{0:?} is not a valid string quote character")] +pub struct StringQuoteCharConversionError(Option); + +impl TryFrom> for StringQuoteChar { + type Error = StringQuoteCharConversionError; + + fn try_from(ch: Option) -> Result { + match ch { + Some('\'') => Ok(StringQuoteChar::Apostrophe), + Some('"') => Ok(StringQuoteChar::DoubleQuote), + _ => Err(StringQuoteCharConversionError(ch)), + } + } +} + +#[derive(Clone)] +pub enum FTStringType { + FString, + TString, +} + +#[derive(Clone)] +pub struct FTStringNode { + pub quote_char: StringQuoteChar, + pub quote_size: StringQuoteSize, + pub parentheses_count: usize, + pub string_start: Option, + // In the syntax there can be multiple format_spec's nested: {x:{y:3}} + pub format_spec_count: usize, + pub is_raw_string: bool, + // ftstring type; either f-string or a t-string + pub string_type: FTStringType, +} + +impl FTStringNode { + pub fn new( + quote_char: StringQuoteChar, + quote_size: StringQuoteSize, + is_raw_string: bool, + string_type: FTStringType, + ) -> Self { + Self { + quote_char, + quote_size, + parentheses_count: 0, + string_start: None, + format_spec_count: 0, + is_raw_string, + string_type, + } + } + + pub fn open_parentheses(&mut self) { + self.parentheses_count += 1; + } + + pub fn close_parentheses(&mut self) { + if self.is_in_format_spec() { + self.format_spec_count -= 1; + } + self.parentheses_count -= 1; + } + + pub fn allow_multiline(&self) -> bool { + self.quote_size == StringQuoteSize::Triple || self.is_in_expr() + } + + pub fn is_in_expr(&self) -> bool { + self.parentheses_count > self.format_spec_count + } + + pub fn is_in_format_spec(&self) -> bool { + !self.is_in_expr() && self.format_spec_count > 0 + } +} diff --git a/native/libcst/src/tokenizer/debug_utils.rs b/native/libcst/src/tokenizer/debug_utils.rs new file mode 100644 index 00000000..1e476a47 --- /dev/null +++ b/native/libcst/src/tokenizer/debug_utils.rs @@ -0,0 +1,16 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::fmt; + +/// An empty struct that when writes "..." when using `fmt::Debug`. Useful for omitting fields when +/// using `fmt::Formatter::debug_struct`. +pub struct EllipsisDebug; + +impl fmt::Debug for EllipsisDebug { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("...") + } +} diff --git a/native/libcst/src/tokenizer/mod.rs b/native/libcst/src/tokenizer/mod.rs new file mode 100644 index 00000000..9f7bbe2c --- /dev/null +++ b/native/libcst/src/tokenizer/mod.rs @@ -0,0 +1,15 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +mod core; +mod debug_utils; +mod operators; +mod text_position; +pub mod whitespace_parser; + +pub use self::core::*; + +#[cfg(test)] +mod tests; diff --git a/native/libcst/src/tokenizer/operators.rs b/native/libcst/src/tokenizer/operators.rs new file mode 100644 index 00000000..51352900 --- /dev/null +++ b/native/libcst/src/tokenizer/operators.rs @@ -0,0 +1,86 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. +// +// Part of this file is derived from the CPython documentation, which is available under the +// zero-clause BSD license. That license does not require that derivative works cite the original +// code or that we retain the original work's copyright information. +// https://docs.python.org/3/license.html#zero-clause-bsd-license-for-code-in-the-python-release-documentation + +use regex::Regex; + +/// A list of strings that make up all the possible operators in a specific version of Python. +/// Derived from the [CPython's token documentation](https://docs.python.org/3/library/token.html). +pub const OPERATORS: &[&str] = &[ + "(", // LPAR + ")", // RPAR + "[", // LSQB + "]", // RSQB + ":", // COLON + ",", // COMMA + ";", // SEMI + "+", // PLUS + "-", // MINUS + "*", // STAR + "/", // SLASH + "|", // VBAR + "&", // AMPER + "<", // LESS + ">", // GREATER + "=", // EQUAL + ".", // DOT + "%", // PERCENT + "{", // LBRACE + "}", // RBRACE + "==", // EQEQUAL + "!=", // NOTEQUAL + "<=", // LESSEQUAL + ">=", // GREATEREQUAL + "~", // TILDE + "^", // CIRCUMFLEX + "<<", // LEFTSHIFT + ">>", // RIGHTSHIFT + "**", // DOUBLESTAR + "+=", // PLUSEQUAL + "-=", // MINEQUAL + "*=", // STAREQUAL + "/=", // SLASHEQUAL + "%=", // PERCENTEQUAL + "&=", // AMPEREQUAL + "|=", // VBAREQUAL + "^=", // CIRCUMFLEXEQUAL + "<<=", // LEFTSHIFTEQUAL + ">>=", // RIGHTSHIFTEQUAL + "**=", // DOUBLESTAREQUAL + "//", // DOUBLESLASH + "//=", // DOUBLESLASHEQUAL + "@", // AT + "@=", // ATEQUAL + "->", // RARROW + "...", // ELLIPSIS + ":=", // COLONEQUAL + // Not a real operator, but needed to support the split_ftstring feature + "!", + // The fake operator added by PEP 401. Technically only valid if used with: + // + // from __future__ import barry_as_FLUFL + "<>", +]; + +thread_local! { +pub static OPERATOR_RE: Regex = { + // sort operators so that we try to match the longest ones first + let mut sorted_operators: Box<[&str]> = OPERATORS.into(); + sorted_operators.sort_unstable_by_key(|op| usize::MAX - op.len()); + Regex::new(&format!( + r"\A({})", + sorted_operators + .iter() + .map(|op| regex::escape(op)) + .collect::>() + .join("|") + )) + .expect("regex") +}; +} diff --git a/native/libcst/src/tokenizer/tests.rs b/native/libcst/src/tokenizer/tests.rs new file mode 100644 index 00000000..20188f47 --- /dev/null +++ b/native/libcst/src/tokenizer/tests.rs @@ -0,0 +1,919 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +/// Tests for the functionality in `tokenize::core`. These tests are not part of the `core` module +/// because they're not a derivative work of CPython, and are therefore not subject to the PSF +/// license. +use crate::tokenizer::core::{TokConfig, TokError, TokState, TokType}; + +fn default_config() -> TokConfig { + TokConfig { + async_hacks: false, + split_ftstring: false, + } +} + +fn tokenize_with_end_marker<'t>( + text: &'t str, + config: &TokConfig, +) -> Result, TokError<'t>> { + let mut result = Vec::new(); + let mut state = TokState::new(text, config); + while let Some(tok_type) = state.next() { + result.push(( + tok_type?, + state.text_pos.slice_from_start_pos(&state.start_pos), + )); + } + Ok(result) +} + +fn tokenize_all<'t>( + text: &'t str, + config: &TokConfig, +) -> Result, TokError<'t>> { + let mut result = tokenize_with_end_marker(text, config)?; + // Remove the EndMarker, since it's on every non-error token stream. + assert_eq!(result.pop().expect("EndMarker").0, TokType::EndMarker); + // Also remove fake newline at the end + if let Some((TokType::Newline, "")) = result.last() { + result.pop(); + } + Ok(result) +} + +#[test] +fn test_indentifier() { + assert_eq!( + tokenize_all("test input", &default_config()), + Ok(vec![(TokType::Name, "test"), (TokType::Name, "input")]) + ); + + assert_eq!( + tokenize_all("__with_underscores", &default_config()), + Ok(vec![(TokType::Name, "__with_underscores")]) + ); + + assert_eq!( + tokenize_all("{ends_with_op}", &default_config()), + Ok(vec![ + (TokType::Op, "{"), + (TokType::Name, "ends_with_op"), + (TokType::Op, "}") + ]) + ); + + assert_eq!( + tokenize_all("\u{0100}\u{0101}\u{0102}unicode", &default_config()), + Ok(vec![(TokType::Name, "\u{0100}\u{0101}\u{0102}unicode")]) + ); +} + +#[test] +fn test_async_await() { + // normally async/await are keywords + assert_eq!( + tokenize_all("async await", &default_config()), + Ok(vec![(TokType::Async, "async"), (TokType::Await, "await")]) + ); + + // with async_hacks, async/await are handled as identifiers by default + assert_eq!( + tokenize_all( + "async await", + &TokConfig { + async_hacks: true, + ..default_config() + } + ), + Ok(vec![(TokType::Name, "async"), (TokType::Name, "await")]) + ); + + // with async_hacks, async/await are handled as keywords in functions + assert_eq!( + tokenize_all( + "async def fn():\n await foo\nawait bar", + &TokConfig { + async_hacks: true, + ..default_config() + } + ), + Ok(vec![ + // this async is followed by a def, so it's converted to an Async + (TokType::Async, "async"), + (TokType::Name, "def"), + (TokType::Name, "fn"), + (TokType::Op, "("), + (TokType::Op, ")"), + (TokType::Op, ":"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + // this await is inside a function, and is converted into an Await + (TokType::Await, "await"), + (TokType::Name, "foo"), + (TokType::Newline, "\n"), + (TokType::Dedent, ""), + // this await is outside the function, and is turned into an identifier + (TokType::Name, "await"), + (TokType::Name, "bar") + ]) + ); +} + +#[test] +fn test_blankline() { + assert_eq!( + tokenize_all("\n \n\t\n\x0c\n\n", &default_config()), + Ok(vec![]) + ); +} + +#[test] +fn test_newline() { + assert_eq!( + tokenize_all("a\nb\rc\r\n", &default_config()), + Ok(vec![ + (TokType::Name, "a"), + (TokType::Newline, "\n"), + (TokType::Name, "b"), + (TokType::Newline, "\r"), + (TokType::Name, "c"), + (TokType::Newline, "\r\n") + ]) + ); +} + +#[test] +fn test_indent_dedent() { + assert_eq!( + tokenize_all("one\n two\n sameindent\n", &default_config()), + Ok(vec![ + (TokType::Name, "one"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + (TokType::Name, "two"), + (TokType::Newline, "\n"), + (TokType::Name, "sameindent"), + (TokType::Newline, "\n"), + (TokType::Dedent, "") + ]) + ); + + assert_eq!( + tokenize_all("one\n two\n \tthree\n", &default_config()), + Ok(vec![ + (TokType::Name, "one"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + (TokType::Name, "two"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + (TokType::Name, "three"), + (TokType::Newline, "\n"), + (TokType::Dedent, ""), + (TokType::Dedent, "") + ]) + ); + + // indentation decreases to a new (smaller) indentation level that wasn't on the stack + assert_eq!( + tokenize_all(" one\n two", &default_config()), + Err(TokError::Dedent), + ); + + // TabSpace error without change in indentation + assert_eq!( + tokenize_all(" one\n\ttwo\n", &default_config()), + Err(TokError::TabSpace), + ); + + // TabSpace error with increase in indentation + assert_eq!( + tokenize_all(" one\n\t\ttwo\n", &default_config()), + Err(TokError::TabSpace), + ); + + // TabSpace error with decrease in indentation + assert_eq!( + tokenize_all(" one\n \ttwo\n\tthree\n", &default_config()), + Err(TokError::TabSpace), + ); + + // this looks like a TabSpace error, but CPython allows it, so we should too + assert!(tokenize_all(" \tone\n\t two\n", &default_config()).is_ok()); +} + +#[test] +fn test_integer_decimal() { + assert_eq!( + tokenize_all("123456789", &default_config()), + Ok(vec![(TokType::Number, "123456789")]) + ); + + assert_eq!( + tokenize_all("1_2_3", &default_config()), + Ok(vec![(TokType::Number, "1_2_3")]) + ); + + // doesn't consume trailing underscores + assert_eq!( + tokenize_all("123_", &default_config()), + Err(TokError::BadDecimal), + ); +} + +#[test] +fn test_integer_leading_zeros() { + assert_eq!( + tokenize_all("000", &default_config()), + Ok(vec![(TokType::Number, "000")]) + ); + + assert_eq!( + tokenize_all("0_0_0", &default_config()), + Ok(vec![(TokType::Number, "0_0_0")]) + ); + + assert_eq!( + tokenize_all("00123", &default_config()), + Err(TokError::BadDecimalLeadingZeros) + ); +} + +#[test] +fn test_integer_hexadecimal() { + assert_eq!( + tokenize_all("0x00Aa12Ff", &default_config()), + Ok(vec![(TokType::Number, "0x00Aa12Ff")]), + ); + + assert_eq!( + tokenize_all("0x_1_2_3", &default_config()), + Ok(vec![(TokType::Number, "0x_1_2_3")]), + ); + + assert_eq!( + tokenize_all("0x123_", &default_config()), + Err(TokError::BadHexadecimal), + ); +} + +#[test] +fn test_integer_octal() { + assert_eq!( + tokenize_all("0o001234567", &default_config()), + Ok(vec![(TokType::Number, "0o001234567")]), + ); + + assert_eq!( + tokenize_all("0o_1_2_3", &default_config()), + Ok(vec![(TokType::Number, "0o_1_2_3")]), + ); + + assert_eq!( + tokenize_all("0o123_", &default_config()), + Err(TokError::BadOctal), + ); + + assert_eq!( + tokenize_all("0o789", &default_config()), + Err(TokError::BadOctalDigit('8')), + ); +} + +#[test] +fn test_integer_binary() { + assert_eq!( + tokenize_all("0b00101011", &default_config()), + Ok(vec![(TokType::Number, "0b00101011")]), + ); + + assert_eq!( + tokenize_all("0b_0_1_0_1", &default_config()), + Ok(vec![(TokType::Number, "0b_0_1_0_1")]), + ); + + assert_eq!( + tokenize_all("0b0101_", &default_config()), + Err(TokError::BadBinary), + ); + + assert_eq!( + tokenize_all("0b0123", &default_config()), + Err(TokError::BadBinaryDigit('2')), + ); +} + +#[test] +fn test_fraction() { + // fraction starting with a dot + assert_eq!( + tokenize_all(".5", &default_config()), + Ok(vec![(TokType::Number, ".5")]) + ); + + // fraction starting with a dot using E + assert_eq!( + tokenize_all(".5e9", &default_config()), + Ok(vec![(TokType::Number, ".5e9")]) + ); + + // fraction starting with a dot using J + assert_eq!( + tokenize_all(".5j", &default_config()), + Ok(vec![(TokType::Number, ".5j")]) + ); + + // fraction starting with a zero + assert_eq!( + tokenize_all("0.5", &default_config()), + Ok(vec![(TokType::Number, "0.5")]) + ); + + // fraction starting with a zero using E + assert_eq!( + tokenize_all("0.5e9", &default_config()), + Ok(vec![(TokType::Number, "0.5e9")]) + ); + + // fraction starting with a zero using J + assert_eq!( + tokenize_all("0.5j", &default_config()), + Ok(vec![(TokType::Number, "0.5j")]) + ); + + // fraction with underscores + assert_eq!( + tokenize_all("1_0.2_5", &default_config()), + Ok(vec![(TokType::Number, "1_0.2_5")]) + ); + + // underscores after the fraction are an error + assert_eq!( + tokenize_all(".5_", &default_config()), + Err(TokError::BadDecimal), + ); + + // doesn't consume underscores around the dot + assert_eq!( + tokenize_all("1_.25", &default_config()), + Err(TokError::BadDecimal), + ); + + // doesn't consume underscores around the dot + assert_eq!( + tokenize_all("1._25", &default_config()), + Ok(vec![(TokType::Number, "1."), (TokType::Name, "_25")]) + ); +} + +#[test] +fn test_string() { + // empty, single quote + assert_eq!( + tokenize_all("''", &default_config()), + Ok(vec![(TokType::String, "''")]), + ); + + // empty, double quote + assert_eq!( + tokenize_all(r#""""#, &default_config()), + Ok(vec![(TokType::String, r#""""#)]), + ); + + // simple string + assert_eq!( + tokenize_all("'test'", &default_config()), + Ok(vec![(TokType::String, "'test'")]), + ); + + // mixed quotes + assert_eq!( + tokenize_all(r#""test'"#, &default_config()), + Err(TokError::UnterminatedString), + ); + + // single quoted strings can contain double quotes, double quoted strings can contain single + // quotes + assert_eq!( + tokenize_all( + r#"'she said "hey"' "but he'd ignored her""#, + &default_config() + ), + Ok(vec![ + (TokType::String, r#"'she said "hey"'"#), + (TokType::String, r#""but he'd ignored her""#) + ]), + ); + + // escape characters + assert_eq!( + tokenize_all("'a\\b\\c\\d\\e\\'\\f\\g'", &default_config()), + Ok(vec![(TokType::String, "'a\\b\\c\\d\\e\\'\\f\\g'"),]), + ); + + // newline in the middle of a string causes an unterminated string + assert_eq!( + tokenize_all("'first\nsecond'", &default_config()), + Err(TokError::UnterminatedString), + ); + + // newlines can be escaped and are preserved in the output + assert_eq!( + tokenize_all("'first\\\nsecond\\\r\nthird\\\r'", &default_config()), + Ok(vec![(TokType::String, "'first\\\nsecond\\\r\nthird\\\r'"),]), + ); +} + +#[test] +fn test_string_triple_quoted() { + // empty, single quote + assert_eq!( + tokenize_all("''''''", &default_config()), + Ok(vec![(TokType::String, "''''''")]), + ); + + // empty, double quote + assert_eq!( + tokenize_all(r#""""""""#, &default_config()), + Ok(vec![(TokType::String, r#""""""""#)]), + ); + + // simple string with newlines + assert_eq!( + tokenize_all("'''\nmulti\rline\r\n'''", &default_config()), + Ok(vec![(TokType::String, "'''\nmulti\rline\r\n'''")]), + ); + + // unterminated string + assert_eq!( + tokenize_all( + "'''hey'there's''quotes'here, but not '' three'", + &default_config() + ), + Err(TokError::UnterminatedTripleQuotedString), + ); +} + +#[test] +fn test_string_prefix() { + // works with double-quoted string + assert_eq!( + tokenize_all(r#"b"""#, &default_config()), + Ok(vec![(TokType::String, r#"b"""#)]), + ); + + // works with triple-quoted string + assert_eq!( + tokenize_all("b'''test'''", &default_config()), + Ok(vec![(TokType::String, "b'''test'''")]), + ); + + // prefix can be capitalized + assert_eq!( + tokenize_all("B'' R'' U'' F''", &default_config()), + Ok(vec![ + (TokType::String, "B''"), + (TokType::String, "R''"), + (TokType::String, "U''"), + (TokType::String, "F''"), + ]), + ); + + // valid prefixes + assert_eq!( + tokenize_all("b'' r'' u'' f'' br'' fr'' rb'' rf''", &default_config()), + Ok(vec![ + (TokType::String, "b''"), + (TokType::String, "r''"), + (TokType::String, "u''"), + (TokType::String, "f''"), + (TokType::String, "br''"), + (TokType::String, "fr''"), + (TokType::String, "rb''"), + (TokType::String, "rf''"), + ]), + ); + + // invalid prefixes + assert_eq!( + tokenize_all("bb'' rr'' uu'' ff'' ur'' ub'' uf'' fb''", &default_config()), + Ok(vec![ + (TokType::Name, "bb"), + (TokType::String, "''"), + (TokType::Name, "rr"), + (TokType::String, "''"), + (TokType::Name, "uu"), + (TokType::String, "''"), + (TokType::Name, "ff"), + (TokType::String, "''"), + (TokType::Name, "ur"), + (TokType::String, "''"), + (TokType::Name, "ub"), + (TokType::String, "''"), + (TokType::Name, "uf"), + (TokType::String, "''"), + (TokType::Name, "fb"), + (TokType::String, "''"), + ]), + ); + + // raw string escapes + assert_eq!( + tokenize_all("r'\\''", &default_config()), + Ok(vec![(TokType::String, "r'\\''")]), + ); + assert_eq!( + tokenize_all(r#"r"\"""#, &default_config()), + Ok(vec![(TokType::String, r#"r"\"""#)]), + ); + assert_eq!( + tokenize_all(r#"r'\\'"#, &default_config()), + Ok(vec![(TokType::String, r#"r'\\'"#)]), + ); + let config = TokConfig { + split_ftstring: true, + ..default_config() + }; + assert_eq!( + tokenize_all("rf'\\''", &config), + Ok(vec![ + (TokType::FStringStart, "rf'"), + (TokType::FStringString, "\\'"), + (TokType::FStringEnd, "'"), + ]), + ); + assert_eq!( + tokenize_all(r#"rf"\"""#, &config), + Ok(vec![ + (TokType::FStringStart, "rf\""), + (TokType::FStringString, r#"\""#), + (TokType::FStringEnd, "\""), + ]), + ); + assert_eq!( + tokenize_all(r#"rf'\\'"#, &config), + Ok(vec![ + (TokType::FStringStart, "rf'"), + (TokType::FStringString, r#"\\"#), + (TokType::FStringEnd, "'"), + ]), + ); +} + +#[test] +fn test_split_ftstring() { + let config = TokConfig { + split_ftstring: true, + ..default_config() + }; + + assert_eq!( + tokenize_all("f''", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::FStringEnd, "'"), + ]), + ); + + assert_eq!( + tokenize_all("f'{value}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::Op, "{"), + (TokType::Name, "value"), + (TokType::Op, "}"), + (TokType::FStringEnd, "'"), + ]), + ); + + assert_eq!( + tokenize_all("f'{{just a string}}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::FStringString, r"{{just a string}}"), + (TokType::FStringEnd, "'"), + ]), + ); + + assert_eq!( + tokenize_all(r"f'\N{Latin Small Letter A}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::FStringString, r"\N{Latin Small Letter A}"), + (TokType::FStringEnd, "'"), + ]), + ); + + // format specifier + assert_eq!( + tokenize_all("f'result: {value:{width}.{precision}}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::FStringString, "result: "), + (TokType::Op, "{"), + (TokType::Name, "value"), + (TokType::Op, ":"), + (TokType::Op, "{"), + (TokType::Name, "width"), + (TokType::Op, "}"), + (TokType::FStringString, "."), + (TokType::Op, "{"), + (TokType::Name, "precision"), + (TokType::Op, "}"), + (TokType::Op, "}"), + (TokType::FStringEnd, "'"), + ]), + ); + + // the walrus operator isn't valid unless parenthesized + assert_eq!( + tokenize_all("f'{a := b}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::Op, "{"), + (TokType::Name, "a"), + (TokType::Op, ":"), + (TokType::FStringString, "= b"), + (TokType::Op, "}"), + (TokType::FStringEnd, "'"), + ]), + ); + + // once parenthesized, this is recognized as the walrus operator + assert_eq!( + tokenize_all("f'{(a := b)}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::Op, "{"), + (TokType::Op, "("), + (TokType::Name, "a"), + (TokType::Op, ":="), + (TokType::Name, "b"), + (TokType::Op, ")"), + (TokType::Op, "}"), + (TokType::FStringEnd, "'"), + ]), + ); +} + +#[test] +fn test_fstring_escapes() { + let config = TokConfig { + split_ftstring: true, + ..default_config() + }; + assert_eq!( + tokenize_all("f'\\{{\\}}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::FStringString, "\\{{\\}}"), + (TokType::FStringEnd, "'"), + ]) + ); + assert_eq!( + tokenize_all(r#"f"regexp_like(path, '.*\{file_type}$')""#, &config), + Ok(vec![ + (TokType::FStringStart, "f\""), + (TokType::FStringString, "regexp_like(path, '.*\\"), + (TokType::Op, "{"), + (TokType::Name, "file_type"), + (TokType::Op, "}"), + (TokType::FStringString, "$')"), + (TokType::FStringEnd, "\""), + ]) + ); +} + +#[test] +fn test_operator() { + assert_eq!( + tokenize_all("= == * ** **= -> . .. ...", &default_config()), + Ok(vec![ + (TokType::Op, "="), + (TokType::Op, "=="), + (TokType::Op, "*"), + (TokType::Op, "**"), + (TokType::Op, "**="), + (TokType::Op, "->"), + (TokType::Op, "."), + (TokType::Op, "."), + (TokType::Op, "."), + (TokType::Op, "...") + ]), + ); +} + +#[test] +fn test_fake_newline() { + assert_eq!( + tokenize_with_end_marker("foo", &default_config()), + Ok(vec![ + (TokType::Name, "foo"), + (TokType::Newline, ""), + (TokType::EndMarker, "") + ]) + ); +} + +#[test] +fn test_fake_newline_when_at_bol() { + assert_eq!( + tokenize_with_end_marker("(\n \\\n)", &default_config()), + Ok(vec![ + (TokType::Op, "("), + (TokType::Op, ")"), + (TokType::Newline, ""), + (TokType::EndMarker, "") + ]) + ) +} + +#[test] +fn test_no_fake_newline_for_empty_input() { + assert_eq!( + tokenize_with_end_marker("", &default_config()), + Ok(vec![(TokType::EndMarker, "")]) + ); +} + +#[test] +fn test_no_fake_newline_for_only_whitespaces() { + assert_eq!( + tokenize_with_end_marker(" ", &default_config()), + Ok(vec![(TokType::EndMarker, "")]) + ); +} + +#[test] +fn test_add_dedents_after_fake_newline() { + assert_eq!( + tokenize_with_end_marker("if 1:\n if 2:\n foo", &default_config()), + Ok(vec![ + (TokType::Name, "if"), + (TokType::Number, "1"), + (TokType::Op, ":"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + (TokType::Name, "if"), + (TokType::Number, "2"), + (TokType::Op, ":"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + (TokType::Name, "foo"), + (TokType::Newline, ""), + (TokType::Dedent, ""), + (TokType::Dedent, ""), + (TokType::EndMarker, "") + ]) + ); +} + +#[test] +fn test_add_dedents_for_dangling_indent() { + assert_eq!( + tokenize_with_end_marker("if 1:\n if 2:\n ", &default_config()), + Ok(vec![ + (TokType::Name, "if"), + (TokType::Number, "1"), + (TokType::Op, ":"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + (TokType::Name, "if"), + (TokType::Number, "2"), + (TokType::Op, ":"), + (TokType::Newline, "\n"), + (TokType::Dedent, ""), + (TokType::EndMarker, "") + ]) + ); +} + +#[test] +fn test_add_dedents_for_dangling_indent_with_comment() { + assert_eq!( + tokenize_with_end_marker("if 1:\n if 2:\n # foo", &default_config()), + Ok(vec![ + (TokType::Name, "if"), + (TokType::Number, "1"), + (TokType::Op, ":"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + (TokType::Name, "if"), + (TokType::Number, "2"), + (TokType::Op, ":"), + (TokType::Newline, "\n"), + (TokType::Dedent, ""), + (TokType::EndMarker, "") + ]) + ); +} + +#[test] +fn test_inconsistent_indentation_at_eof() { + assert_eq!( + tokenize_all("if 1:\n pass\n ", &default_config()), + Ok(vec![ + (TokType::Name, "if"), + (TokType::Number, "1"), + (TokType::Op, ":"), + (TokType::Newline, "\n"), + (TokType::Indent, ""), + (TokType::Name, "pass"), + (TokType::Newline, "\n"), + (TokType::Dedent, ""), + ]) + ) +} + +#[test] +fn test_nested_f_string_specs() { + let config = TokConfig { + split_ftstring: true, + ..default_config() + }; + assert_eq!( + tokenize_all("f'{_:{_:}{_}}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::Op, "{"), + (TokType::Name, "_"), + (TokType::Op, ":"), + (TokType::Op, "{"), + (TokType::Name, "_"), + (TokType::Op, ":"), + (TokType::Op, "}"), + (TokType::Op, "{"), + (TokType::Name, "_"), + (TokType::Op, "}"), + (TokType::Op, "}"), + (TokType::FStringEnd, "'") + ]) + ) +} + +#[test] +fn test_nested_f_strings() { + let config = TokConfig { + split_ftstring: true, + ..default_config() + }; + assert_eq!( + tokenize_all("f'{f'{2}'}'", &config), + Ok(vec![ + (TokType::FStringStart, "f'"), + (TokType::Op, "{"), + (TokType::FStringStart, "f'"), + (TokType::Op, "{"), + (TokType::Number, "2"), + (TokType::Op, "}"), + (TokType::FStringEnd, "'"), + (TokType::Op, "}"), + (TokType::FStringEnd, "'") + ]) + ) +} +#[test] +fn test_can_tokenize_t_string_basic() { + let config = TokConfig { + split_ftstring: true, + ..default_config() + }; + assert_eq!( + tokenize_all("t'Nothing to see here, move along'", &config), + Ok(vec![ + (TokType::TStringStart, "t'"), + (TokType::TStringString, "Nothing to see here, move along"), + (TokType::TStringEnd, "'") + ]) + ) +} +#[test] +fn test_can_tokenize_f_and_t_strings() { + let config = TokConfig { + split_ftstring: true, + ..default_config() + }; + assert_eq!( + tokenize_all("t\"TMiddle{f'FMiddle{t'{2}'}'}\"", &config), + Ok(vec![ + (TokType::TStringStart, "t\""), + (TokType::TStringString, "TMiddle"), + (TokType::Op, "{"), + (TokType::FStringStart, "f'"), + (TokType::FStringString, "FMiddle"), + (TokType::Op, "{"), + (TokType::TStringStart, "t'"), + (TokType::Op, "{"), + (TokType::Number, "2"), + (TokType::Op, "}"), + (TokType::TStringEnd, "'"), + (TokType::Op, "}"), + (TokType::FStringEnd, "'"), + (TokType::Op, "}"), + (TokType::TStringEnd, "\"") + ]) + ) +} diff --git a/native/libcst/src/tokenizer/text_position/char_width.rs b/native/libcst/src/tokenizer/text_position/char_width.rs new file mode 100644 index 00000000..10c65a41 --- /dev/null +++ b/native/libcst/src/tokenizer/text_position/char_width.rs @@ -0,0 +1,329 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::str::Chars; + +#[derive(Debug, Eq, PartialEq)] +pub struct CharWidth { + pub byte_width: usize, + pub char_width: usize, + pub character: char, +} + +/// Iterates over characters (unicode codepoints) normalizing `'\r'` and `"\r\n"` to `'\n'`. Also +/// gives the width of each character, but `'\r\n'` is counted as 2 bytes and 2 characters instead +/// of one even after being normalized to '\n'. +#[derive(Clone)] +pub struct NewlineNormalizedCharWidths<'t> { + iter: Chars<'t>, + text: &'t str, + idx: usize, +} + +impl<'t> NewlineNormalizedCharWidths<'t> { + pub fn new(text: &'t str) -> Self { + Self { + text, + iter: text.chars(), + idx: 0, + } + } + + pub fn previous(&mut self) -> Option<::Item> { + // This function is called infrequently. + let mut back_iter = self.text[..self.idx].chars(); + let result = match back_iter.next_back() { + // Unlikely: \n, normalization *may* be needed + Some('\n') => { + // Peek at the previous character to see we're a `\r\n` sequence + match back_iter.next_back() { + Some('\r') => Some(CharWidth { + byte_width: '\r'.len_utf8() + '\n'.len_utf8(), + char_width: 2, + character: '\n', + }), + _ => Some(CharWidth { + byte_width: '\n'.len_utf8(), + char_width: 1, + character: '\n', + }), + } + } + // Unlikely: \r, normalization is needed + Some('\r') => Some(CharWidth { + byte_width: '\n'.len_utf8(), + char_width: 1, + character: '\n', + }), + // Common case: Not \r or \n, so no normalization is needed + Some(ch) => Some(CharWidth { + byte_width: ch.len_utf8(), + char_width: 1, + character: ch, + }), + // Unlikely: EOF + None => None, + }; + if let Some(r) = &result { + self.idx -= r.byte_width; + self.iter = self.text[self.idx..].chars(); + } + result + } + + pub fn peek_character(&self) -> Option { + // This function is called very frequently. + // + // We're not using peekable or caching here, since this should be cheap enough on it's own, + // though benchmarking might prove otherwise. + match self.iter.clone().next() { + Some('\r') => Some('\n'), + ch => ch, + } + } +} + +impl<'t> Iterator for NewlineNormalizedCharWidths<'t> { + type Item = CharWidth; + + fn next(&mut self) -> Option { + // This function is called very frequently. + let result = match self.iter.next() { + // Unlikely: \r, normalization is needed + Some('\r') => { + // Peek at the next character to see if it's '\n'. + let mut speculative = self.iter.clone(); + match speculative.next() { + Some('\n') => { + self.iter = speculative; + Some(CharWidth { + byte_width: '\r'.len_utf8() + '\n'.len_utf8(), + char_width: 2, + character: '\n', + }) + } + _ => Some(CharWidth { + byte_width: '\r'.len_utf8(), + char_width: 1, + character: '\n', + }), + } + } + // Common case: Not \r, so no normalization is needed + Some(ch) => Some(CharWidth { + byte_width: ch.len_utf8(), + char_width: 1, + character: ch, + }), + // Unlikely: EOF + None => None, + }; + if let Some(r) = &result { + self.idx += r.byte_width; + } + result + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ascii_no_newlines() { + let mut cw = NewlineNormalizedCharWidths::new("in"); + + // go forward + assert_eq!(cw.peek_character(), Some('i')); + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 'i' + }) + ); + assert_eq!(cw.peek_character(), Some('n')); + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 'n' + }) + ); + + // end of text + assert_eq!(cw.peek_character(), None); + assert_eq!(cw.next(), None); + + // go backwards + assert_eq!( + cw.previous(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 'n' + }) + ); + assert_eq!( + cw.previous(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 'i' + }) + ); + + // beginning of text + assert_eq!(cw.previous(), None); + + // try going foward again + assert_eq!(cw.peek_character(), Some('i')); + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 'i' + }) + ); + } + + #[test] + fn test_unicode_no_newlines() { + // "test" with an accented 'e' + let mut cw = NewlineNormalizedCharWidths::new("t\u{00e9}st"); + + // go forward + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 't' + }) + ); + assert_eq!(cw.peek_character(), Some('\u{00e9}')); + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 2, + char_width: 1, + character: '\u{00e9}' + }) + ); + assert_eq!(cw.peek_character(), Some('s')); + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 's' + }) + ); + + // go backwards + assert_eq!( + cw.previous(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 's' + }) + ); + assert_eq!( + cw.previous(), + Some(CharWidth { + byte_width: 2, + char_width: 1, + character: '\u{00e9}' + }) + ); + assert_eq!( + cw.previous(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: 't' + }) + ); + } + + #[test] + fn test_newlines() { + let mut cw = NewlineNormalizedCharWidths::new("\n\r\r\n"); + + // go forward + assert_eq!(cw.peek_character(), Some('\n')); + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: '\n' + }) + ); + assert_eq!(cw.peek_character(), Some('\n')); + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: '\n' + }) + ); + assert_eq!(cw.peek_character(), Some('\n')); + assert_eq!( + cw.next(), + Some(CharWidth { + byte_width: 2, + char_width: 2, + character: '\n' + }) + ); + + // end of text + assert_eq!(cw.peek_character(), None); + assert_eq!(cw.next(), None); + + // go backwards + assert_eq!( + cw.previous(), + Some(CharWidth { + byte_width: 2, + char_width: 2, + character: '\n' + }) + ); + assert_eq!( + cw.previous(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: '\n' + }) + ); + assert_eq!( + cw.previous(), + Some(CharWidth { + byte_width: 1, + char_width: 1, + character: '\n' + }) + ); + + // beginning of text + assert_eq!(cw.previous(), None); + } + + #[test] + fn test_empty() { + let mut cw = NewlineNormalizedCharWidths::new(""); + assert_eq!(cw.peek_character(), None); + assert_eq!(cw.next(), None); + assert_eq!(cw.previous(), None); + } +} diff --git a/native/libcst/src/tokenizer/text_position/mod.rs b/native/libcst/src/tokenizer/text_position/mod.rs new file mode 100644 index 00000000..42a7b682 --- /dev/null +++ b/native/libcst/src/tokenizer/text_position/mod.rs @@ -0,0 +1,355 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +mod char_width; + +use regex::Regex; +use std::fmt; + +use crate::tokenizer::debug_utils::EllipsisDebug; +use char_width::NewlineNormalizedCharWidths; + +pub trait TextPattern { + fn match_len(&self, text: &str) -> Option; +} + +impl TextPattern for &Regex { + // make sure to anchor your regex with \A + fn match_len(&self, text: &str) -> Option { + self.find(text).map(|m| m.end()) + } +} + +impl TextPattern for &str { + // make sure to anchor your regex with \A + fn match_len(&self, text: &str) -> Option { + if text.starts_with(self) { + Some(self.len()) + } else { + None + } + } +} + +// This is Clone, since that's needed to support async_hacks, but you probably don't usually want to +// clone. Use TextPositionSnapshot instead. +#[derive(Clone)] +pub struct TextPosition<'t> { + text: &'t str, + char_widths: NewlineNormalizedCharWidths<'t>, + inner_byte_idx: usize, + inner_char_column_number: usize, + inner_byte_column_number: usize, + inner_line_number: usize, +} + +/// A lightweight immutable version of TextPosition that's slightly +/// cheaper to construct/store. Used for storing the start position of tokens. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct TextPositionSnapshot { + pub inner_byte_idx: usize, + pub inner_char_column_number: usize, + pub inner_line_number: usize, +} + +impl TextPositionSnapshot { + pub fn byte_idx(&self) -> usize { + self.inner_byte_idx + } + + pub fn char_column_number(&self) -> usize { + self.inner_char_column_number + } + + pub fn line_number(&self) -> usize { + self.inner_line_number + } +} + +impl<'t> TextPosition<'t> { + pub fn new(text: &'t str) -> Self { + Self { + text, + char_widths: NewlineNormalizedCharWidths::new(text), + inner_byte_idx: 0, + inner_char_column_number: 0, + inner_byte_column_number: 0, + inner_line_number: 1, + } + } + + /// Peeks at the next character. Similar to `std::iter::Peekable`, but doesn't modify our + /// internal position counters like wrapping this in `Peekable` would. + pub fn peek(&mut self) -> Option<::Item> { + self.char_widths.peek_character() + } + + /// Matches, but does not consume TextPattern. + /// + /// Caution: This does not normalize `'\r'` characters, like `peek()` and `next()` do. + pub fn matches(&self, pattern: P) -> bool { + let rest_of_text = &self.text[self.inner_byte_idx..]; + let match_len = pattern.match_len(rest_of_text); + match match_len { + Some(match_len) => { + assert!( + !rest_of_text[..match_len].contains(|x| x == '\r' || x == '\n'), + "matches pattern must not match a newline", + ); + true + } + None => false, + } + } + + /// Moves the iterator back one character. Panics if a newline is encountered or if we try to + /// back up past the beginning of the text. + pub fn backup_no_newline(&mut self) { + if let Some(cw) = self.char_widths.previous() { + // If we tried to back up across a newline, we'd have to recompute char_column_number, + // which would be expensive, so it's unsupported. + self.inner_char_column_number = self + .inner_char_column_number + .checked_sub(1) + .expect("cannot back up past the beginning of a line."); + self.inner_byte_column_number = self + .inner_byte_column_number + .checked_sub(cw.byte_width) + .expect("cannot back up past the beginning of a line."); + self.inner_byte_idx -= cw.byte_width; + } else { + panic!("Tried to backup past the beginning of the text.") + } + } + + /// Tries to consume the given TextPattern, moving the TextPosition forward. Returns false if no + /// match was found. Does not support newlines. + /// + /// Panics if a newline is consumed as part of the pattern. + pub fn consume(&mut self, pattern: P) -> bool { + let rest_of_text = &self.text[self.inner_byte_idx..]; + if let Some(len) = pattern.match_len(rest_of_text) { + let new_byte_idx = self.inner_byte_idx + len; + // Call next() a bunch of times to advance the character counters. There's no way to + // shortcut this because we don't know how many characters are in a slice of bytes, + // though we could use a faster algorithm that inspects multiple characters at once + // (e.g. SIMD). + while self.inner_byte_idx < new_byte_idx { + // We can't support newline normalization in this API without copying the string, so + // rather than exposing that (potentially dangerous) behavior, panic if it happens. + assert!( + self.next() != Some('\n'), + "consume pattern must not match a newline", + ); + } + // this shouldn't be possible for the provided implementations of TextPattern + debug_assert!( + self.inner_byte_idx == new_byte_idx, + "pattern ended on a non-character boundary", + ); + true + } else { + false + } + } + + pub fn text(&self) -> &'t str { + self.text + } + + pub fn slice_from_start_pos(&self, start_pos: &TextPositionSnapshot) -> &'t str { + &self.text[start_pos.byte_idx()..self.byte_idx()] + } + + /// Returns the number of bytes we've traversed. This is useful for Rust code that needs to + /// slice the input source code, since Rust slices operate on bytes and not unicode codepoints. + pub fn byte_idx(&self) -> usize { + self.inner_byte_idx + } + + /// Returns the column number in terms of number of characters (unicode codepoints) past the + /// beginning of the line. Zero-indexed. + pub fn char_column_number(&self) -> usize { + self.inner_char_column_number + } + + pub fn byte_column_number(&self) -> usize { + self.inner_byte_column_number + } + + /// Returns the one-indexed line number. + pub fn line_number(&self) -> usize { + self.inner_line_number + } +} + +impl Iterator for TextPosition<'_> { + type Item = char; + + /// Gets the next character. This has the side-effect of advancing the internal position + /// counters. + fn next(&mut self) -> Option { + if let Some(cw) = self.char_widths.next() { + self.inner_byte_idx += cw.byte_width; + match cw.character { + '\n' => { + self.inner_line_number += 1; + self.inner_char_column_number = 0; + self.inner_byte_column_number = 0; + } + _ => { + self.inner_char_column_number += cw.char_width; + self.inner_byte_column_number += cw.byte_width; + } + } + Some(cw.character) + } else { + None + } + } +} + +impl fmt::Debug for TextPosition<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TextPosition") + .field("text", &EllipsisDebug) + .field("char_widths", &EllipsisDebug) + .field("inner_byte_idx", &self.inner_byte_idx) + .field("inner_char_column_number", &self.inner_char_column_number) + .field("inner_byte_column_number", &self.inner_byte_column_number) + .field("inner_line_number", &self.inner_line_number) + .finish() + } +} + +impl From<&TextPosition<'_>> for TextPositionSnapshot { + fn from(tp: &TextPosition) -> Self { + Self { + inner_byte_idx: tp.inner_byte_idx, + inner_char_column_number: tp.inner_char_column_number, + inner_line_number: tp.inner_line_number, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_empty() { + let mut pos = TextPosition::new(""); + assert_eq!(pos.byte_idx(), 0); + assert_eq!(pos.char_column_number(), 0); + assert_eq!(pos.line_number(), 1); + assert_eq!(pos.peek(), None); + assert!(!pos.consume(&Regex::new(r"\Awon't match").unwrap())); + assert!(pos.consume(&Regex::new(r"\A").unwrap())); + assert_eq!(pos.next(), None); + // call next() again to verify that it's fused + assert_eq!(pos.next(), None); + } + + #[test] + fn test_ascii() { + let mut pos = TextPosition::new("abcdefg"); + + assert_eq!(pos.peek(), Some('a')); + assert_eq!(pos.next(), Some('a')); + assert_eq!(pos.byte_idx(), 1); + assert_eq!(pos.char_column_number(), 1); + assert_eq!(pos.line_number(), 1); + + // consume a few characters with a regex + assert!(!pos.consume(&Regex::new(r"\Awon't match").unwrap())); + assert!(pos.consume(&Regex::new(r"\Abcd").unwrap())); + assert_eq!(pos.byte_idx(), 4); + assert_eq!(pos.char_column_number(), 4); + assert_eq!(pos.line_number(), 1); + + // consume the rest of the text + assert_eq!(pos.next(), Some('e')); + assert_eq!(pos.next(), Some('f')); + assert_eq!(pos.next(), Some('g')); + assert_eq!(pos.next(), None); + assert_eq!(pos.byte_idx(), 7); + assert_eq!(pos.char_column_number(), 7); + assert_eq!(pos.line_number(), 1); + } + + #[test] + fn test_unicode() { + let mut pos = TextPosition::new("\u{00e9}abc"); + + assert_eq!(pos.peek(), Some('\u{00e9}')); + assert_eq!(pos.next(), Some('\u{00e9}')); + } + + #[test] + fn test_newline_lf() { + let mut pos = TextPosition::new("ab\nde"); + + assert_eq!(pos.next(), Some('a')); + assert_eq!(pos.next(), Some('b')); + assert_eq!(pos.line_number(), 1); + assert_eq!(pos.char_column_number(), 2); + + assert_eq!(pos.next(), Some('\n')); + assert_eq!(pos.line_number(), 2); + assert_eq!(pos.char_column_number(), 0); + + assert_eq!(pos.next(), Some('d')); + assert_eq!(pos.next(), Some('e')); + assert_eq!(pos.next(), None); + assert_eq!(pos.line_number(), 2); + assert_eq!(pos.char_column_number(), 2); + + assert_eq!(pos.byte_idx(), 5); + } + + #[test] + fn test_newline_cr() { + let mut pos = TextPosition::new("ab\rde"); + + assert_eq!(pos.next(), Some('a')); + assert_eq!(pos.next(), Some('b')); + assert_eq!(pos.line_number(), 1); + assert_eq!(pos.char_column_number(), 2); + + assert_eq!(pos.next(), Some('\n')); + assert_eq!(pos.line_number(), 2); + assert_eq!(pos.char_column_number(), 0); + + assert_eq!(pos.next(), Some('d')); + assert_eq!(pos.next(), Some('e')); + assert_eq!(pos.next(), None); + assert_eq!(pos.line_number(), 2); + assert_eq!(pos.char_column_number(), 2); + + assert_eq!(pos.byte_idx(), 5); + } + + #[test] + fn test_newline_cr_lf() { + let mut pos = TextPosition::new("ab\r\nde"); + + assert_eq!(pos.next(), Some('a')); + assert_eq!(pos.next(), Some('b')); + assert_eq!(pos.line_number(), 1); + assert_eq!(pos.char_column_number(), 2); + + assert_eq!(pos.next(), Some('\n')); + assert_eq!(pos.line_number(), 2); + assert_eq!(pos.char_column_number(), 0); + + assert_eq!(pos.next(), Some('d')); + assert_eq!(pos.next(), Some('e')); + assert_eq!(pos.next(), None); + assert_eq!(pos.line_number(), 2); + assert_eq!(pos.char_column_number(), 2); + + assert_eq!(pos.byte_idx(), 6); + } +} diff --git a/native/libcst/src/tokenizer/whitespace_parser.rs b/native/libcst/src/tokenizer/whitespace_parser.rs new file mode 100644 index 00000000..3f96bab2 --- /dev/null +++ b/native/libcst/src/tokenizer/whitespace_parser.rs @@ -0,0 +1,527 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use crate::nodes::{ + Comment, EmptyLine, Fakeness, Newline, ParenthesizableWhitespace, ParenthesizedWhitespace, + SimpleWhitespace, TrailingWhitespace, +}; +use memchr::{memchr2, memchr2_iter}; +use thiserror::Error; + +use crate::Token; + +use super::TokType; + +#[allow(clippy::upper_case_acronyms, clippy::enum_variant_names)] +#[derive(Error, Debug, PartialEq, Eq)] +pub enum WhitespaceError { + #[error("WTF")] + WTF, + #[error("Internal error while parsing whitespace: {0}")] + InternalError(String), + #[error("Failed to parse mandatory trailing whitespace")] + TrailingWhitespaceError, +} + +type Result = std::result::Result; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct State<'a> { + pub line: usize, // one-indexed (to match parso's behavior) + pub column: usize, // zero-indexed (to match parso's behavior) + pub column_byte: usize, + pub absolute_indent: &'a str, + pub is_parenthesized: bool, + pub byte_offset: usize, +} + +impl<'a> Default for State<'a> { + fn default() -> Self { + Self { + line: 1, + column: 0, + column_byte: 0, + absolute_indent: "", + is_parenthesized: false, + byte_offset: 0, + } + } +} + +// TODO +pub struct Config<'a> { + pub input: &'a str, + pub lines: Vec<&'a str>, + pub default_newline: &'a str, + pub default_indent: &'a str, +} + +impl<'a> Config<'a> { + pub fn new(input: &'a str, tokens: &[Token<'a>]) -> Self { + let mut default_indent = " "; + for tok in tokens { + if tok.r#type == TokType::Indent { + default_indent = tok.relative_indent.unwrap(); + break; + } + } + + let mut lines = Vec::new(); + let mut start = 0; + let mut newline_positions = memchr2_iter(b'\n', b'\r', input.as_bytes()); + + while let Some(newline_position) = newline_positions.next() { + let newline_character = input.as_bytes()[newline_position] as char; + + let len = if newline_character == '\r' + && input.as_bytes().get(newline_position + 1) == Some(&b'\n') + { + // Skip the next '\n' + newline_positions.next(); + 2 + } else { + 1 + }; + + let end = newline_position + len; + lines.push(&input[start..end]); + start = end; + } + + // Push the last line if it isn't terminated by a newline character + if start < input.len() { + lines.push(&input[start..]); + } + + let default_newline = match lines.first().map(|line| line.as_bytes()).unwrap_or(&[]) { + [.., b'\r', b'\n'] => "\r\n", + [.., b'\n'] => "\n", + [.., b'\r'] => "\r", + _ => "\n", + }; + + Self { + input, + lines, + default_newline, + default_indent, + } + } + + pub fn has_trailing_newline(&self) -> bool { + self.input.ends_with('\n') + && !self.input.ends_with("\\\n") + && !self.input.ends_with("\\\r\n") + } + + fn get_line(&self, line_number: usize) -> Result<&'a str> { + let err_fn = || { + WhitespaceError::InternalError(format!( + "tried to get line {} which is out of range", + line_number + )) + }; + self.lines + .get(line_number.checked_sub(1).ok_or_else(err_fn)?) + .map(|l| &l[..]) + .ok_or_else(err_fn) + } + + fn get_line_after_column(&self, line_number: usize, column_index: usize) -> Result<&'a str> { + self.get_line(line_number)? + .get(column_index..) + .ok_or_else(|| { + WhitespaceError::InternalError(format!( + "Column index {} out of range for line {}", + column_index, line_number + )) + }) + } +} + +#[derive(Debug)] +enum ParsedEmptyLine<'a> { + NoIndent, + Line(EmptyLine<'a>), +} + +fn parse_empty_line<'a>( + config: &Config<'a>, + state: &mut State, + override_absolute_indent: Option<&'a str>, +) -> Result> { + let mut speculative_state = state.clone(); + if let Ok(indent) = parse_indent(config, &mut speculative_state, override_absolute_indent) { + let whitespace = parse_simple_whitespace(config, &mut speculative_state)?; + let comment = parse_comment(config, &mut speculative_state)?; + if let Some(newline) = parse_newline(config, &mut speculative_state)? { + *state = speculative_state; + return Ok(ParsedEmptyLine::Line(EmptyLine { + indent, + whitespace, + comment, + newline, + })); + } + } + Ok(ParsedEmptyLine::NoIndent) +} + +fn _parse_empty_lines<'a>( + config: &Config<'a>, + state: &mut State<'a>, + override_absolute_indent: Option<&'a str>, +) -> Result, EmptyLine<'a>)>> { + let mut lines = vec![]; + loop { + let last_state = state.clone(); + let parsed_line = parse_empty_line(config, state, override_absolute_indent)?; + if *state == last_state { + break; + } + match parsed_line { + ParsedEmptyLine::NoIndent => break, + ParsedEmptyLine::Line(l) => lines.push((state.clone(), l)), + } + } + Ok(lines) +} + +pub fn parse_empty_lines<'a>( + config: &Config<'a>, + state: &mut State<'a>, + override_absolute_indent: Option<&'a str>, +) -> Result>> { + // If override_absolute_indent is Some, then we need to parse all lines up to and including the + // last line that is indented at our level. These all belong to the footer and not to the next + // line's leading_lines. + // + // We don't know what the last line with indent=True is, and there could be indent=False lines + // interspersed with indent=True lines, so we need to speculatively parse all possible empty + // lines, and then unwind to find the last empty line with indent=True. + let mut speculative_state = state.clone(); + let mut lines = _parse_empty_lines(config, &mut speculative_state, override_absolute_indent)?; + + if override_absolute_indent.is_some() { + // Remove elements from the end until we find an indented line. + while let Some((_, empty_line)) = lines.last() { + if empty_line.indent { + break; + } + lines.pop(); + } + } + + if let Some((final_state, _)) = lines.last() { + // update the state to match the last line that we captured + *state = final_state.clone(); + } + + Ok(lines.into_iter().map(|(_, e)| e).collect()) +} + +pub fn parse_comment<'a>(config: &Config<'a>, state: &mut State) -> Result>> { + let newline_after = config.get_line_after_column(state.line, state.column_byte)?; + if newline_after.as_bytes().first() != Some(&b'#') { + return Ok(None); + } + let comment_str = if let Some(idx) = memchr2(b'\n', b'\r', newline_after.as_bytes()) { + &newline_after[..idx] + } else { + newline_after + }; + advance_this_line( + config, + state, + comment_str.chars().count(), + comment_str.len(), + )?; + Ok(Some(Comment(comment_str))) +} + +pub fn parse_newline<'a>(config: &Config<'a>, state: &mut State) -> Result>> { + let newline_after = config.get_line_after_column(state.line, state.column_byte)?; + let len = match newline_after.as_bytes() { + [b'\n', ..] => 1, + [b'\r', b'\n', ..] => 2, + [b'\r', ..] => 1, + _ => 0, + }; + if len > 0 { + let newline_str = &newline_after[..len]; + advance_this_line(config, state, len, len)?; + if state.column_byte != config.get_line(state.line)?.len() { + return Err(WhitespaceError::InternalError(format!( + "Found newline at ({}, {}) but it's not EOL", + state.line, state.column + ))); + } + if state.line < config.lines.len() { + advance_to_next_line(config, state)?; + } + return Ok(Some(Newline( + if newline_str == config.default_newline { + None + } else { + Some(newline_str) + }, + Fakeness::Real, + ))); + } + + // If we're at the end of the file but not on BOL, that means this is the fake + // newline inserted by the tokenizer. + if state.byte_offset == config.input.len() && state.column_byte != 0 { + return Ok(Some(Newline(None, Fakeness::Fake))); + } + Ok(None) +} + +pub fn parse_optional_trailing_whitespace<'a>( + config: &Config<'a>, + state: &mut State, +) -> Result>> { + let mut speculative_state = state.clone(); + let whitespace = parse_simple_whitespace(config, &mut speculative_state)?; + let comment = parse_comment(config, &mut speculative_state)?; + if let Some(newline) = parse_newline(config, &mut speculative_state)? { + *state = speculative_state; + Ok(Some(TrailingWhitespace { + whitespace, + comment, + newline, + })) + } else { + Ok(None) + } +} + +pub fn parse_trailing_whitespace<'a>( + config: &Config<'a>, + state: &mut State, +) -> Result> { + match parse_optional_trailing_whitespace(config, state)? { + Some(ws) => Ok(ws), + _ => Err(WhitespaceError::TrailingWhitespaceError), + } +} + +fn parse_indent<'a>( + config: &Config<'a>, + state: &mut State, + override_absolute_indent: Option<&'a str>, +) -> Result { + let absolute_indent = override_absolute_indent.unwrap_or(state.absolute_indent); + if state.column_byte != 0 { + if state.column_byte == config.get_line(state.line)?.len() + && state.line == config.lines.len() + { + Ok(false) + } else { + Err(WhitespaceError::InternalError( + "Column should not be 0 when parsing an index".to_string(), + )) + } + } else { + Ok( + if config + .get_line_after_column(state.line, state.column_byte)? + .starts_with(absolute_indent) + { + state.column_byte += absolute_indent.len(); + state.column += absolute_indent.chars().count(); + state.byte_offset += absolute_indent.len(); + true + } else { + false + }, + ) + } +} + +fn advance_to_next_line<'a>(config: &Config<'a>, state: &mut State) -> Result<()> { + let cur_line = config.get_line(state.line)?; + state.byte_offset += cur_line.len() - state.column_byte; + state.column = 0; + state.column_byte = 0; + state.line += 1; + Ok(()) +} + +fn advance_this_line<'a>( + config: &Config<'a>, + state: &mut State, + char_count: usize, + offset: usize, +) -> Result<()> { + let cur_line = config.get_line(state.line)?; + if cur_line.len() < state.column_byte + offset { + return Err(WhitespaceError::InternalError(format!( + "Tried to advance past line {}'s end", + state.line + ))); + } + state.column += char_count; + state.column_byte += offset; + state.byte_offset += offset; + Ok(()) +} + +pub fn parse_simple_whitespace<'a>( + config: &Config<'a>, + state: &mut State, +) -> Result> { + let capture_ws = |line, col| -> Result<&'a str> { + let line = config.get_line_after_column(line, col)?; + let bytes = line.as_bytes(); + let mut idx = 0; + while idx < bytes.len() { + match bytes[idx..] { + [b' ' | b'\t' | b'\x0c', ..] => idx += 1, + [b'\\', b'\r', b'\n', ..] => idx += 3, + [b'\\', b'\r' | b'\n', ..] => idx += 2, + _ => break, + } + } + Ok(&line[..idx]) + }; + let start_offset = state.byte_offset; + let mut prev_line: &str; + loop { + prev_line = capture_ws(state.line, state.column_byte)?; + if !prev_line.contains('\\') { + break; + } + advance_to_next_line(config, state)?; + } + advance_this_line(config, state, prev_line.chars().count(), prev_line.len())?; + + Ok(SimpleWhitespace( + &config.input[start_offset..state.byte_offset], + )) +} + +pub fn parse_parenthesizable_whitespace<'a>( + config: &Config<'a>, + state: &mut State<'a>, +) -> Result> { + if state.is_parenthesized { + if let Some(ws) = parse_parenthesized_whitespace(config, state)? { + return Ok(ParenthesizableWhitespace::ParenthesizedWhitespace(ws)); + } + } + parse_simple_whitespace(config, state).map(ParenthesizableWhitespace::SimpleWhitespace) +} + +pub fn parse_parenthesized_whitespace<'a>( + config: &Config<'a>, + state: &mut State<'a>, +) -> Result>> { + if let Some(first_line) = parse_optional_trailing_whitespace(config, state)? { + let empty_lines = _parse_empty_lines(config, state, None)? + .into_iter() + .map(|(_, line)| line) + .collect(); + let indent = parse_indent(config, state, None)?; + let last_line = parse_simple_whitespace(config, state)?; + Ok(Some(ParenthesizedWhitespace { + first_line, + empty_lines, + indent, + last_line, + })) + } else { + Ok(None) + } +} + +#[cfg(test)] +mod tests { + use crate::{tokenize, Comment, Config, Result, SimpleWhitespace}; + + use super::{parse_comment, parse_simple_whitespace}; + + #[test] + fn config_mixed_newlines() -> Result<'static, ()> { + let source = "'' % {\n'test1': '',\r 'test2': '',\r\n}"; + let tokens = tokenize(source)?; + + let config = Config::new(source, &tokens); + + assert_eq!( + &config.lines, + &["'' % {\n", "'test1': '',\r", " 'test2': '',\r\n", "}"] + ); + + Ok(()) + } + + fn _parse_simple_whitespace(src: &str) -> Result { + let tokens = tokenize(src)?; + let config = Config::new(src, &tokens); + let mut state = Default::default(); + Ok(parse_simple_whitespace(&config, &mut state)?) + } + + #[test] + fn simple_whitespace_line_continuations() -> Result<'static, ()> { + assert_eq!( + _parse_simple_whitespace(" \\\n # foo")?, + SimpleWhitespace(" \\\n ") + ); + + assert_eq!( + _parse_simple_whitespace(" \\\r # foo")?, + SimpleWhitespace(" \\\r ") + ); + assert_eq!( + _parse_simple_whitespace(" \\\r\n # foo")?, + SimpleWhitespace(" \\\r\n ") + ); + + assert_eq!( + _parse_simple_whitespace(" \\\r\n\\\n # foo")?, + SimpleWhitespace(" \\\r\n\\\n ") + ); + + Ok(()) + } + + #[test] + fn simple_whitespace_mixed() -> Result<'static, ()> { + assert_eq!( + _parse_simple_whitespace(" \t\x0clol")?, + SimpleWhitespace(" \t\x0c"), + ); + + Ok(()) + } + + fn _parse_comment(src: &str) -> Result> { + let tokens = tokenize(src)?; + let config = Config::new(src, &tokens); + let mut state = Default::default(); + Ok(parse_comment(&config, &mut state)?) + } + + #[test] + fn single_comment() -> Result<'static, ()> { + assert_eq!(_parse_comment("# foo\n# bar")?, Some(Comment("# foo"))); + Ok(()) + } + + #[test] + fn comment_until_eof() -> Result<'static, ()> { + assert_eq!(_parse_comment("#")?, Some(Comment("#"))); + Ok(()) + } + + #[test] + fn no_comment() -> Result<'static, ()> { + assert_eq!(_parse_comment("foo")?, None); + assert_eq!(_parse_comment("\n")?, None); + Ok(()) + } +} diff --git a/native/libcst/tests/.gitattributes b/native/libcst/tests/.gitattributes new file mode 100644 index 00000000..e50e9b7b --- /dev/null +++ b/native/libcst/tests/.gitattributes @@ -0,0 +1 @@ +fixtures/mixed_newlines.py autocrlf=false \ No newline at end of file diff --git a/native/libcst/tests/fixtures/big_binary_operator.py b/native/libcst/tests/fixtures/big_binary_operator.py new file mode 100644 index 00000000..2ab0d65c --- /dev/null +++ b/native/libcst/tests/fixtures/big_binary_operator.py @@ -0,0 +1,32 @@ +( # 350 binary operators lets go + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' + + 'X' + 'Y' + 'Z' + 'Q' + 'T' +) diff --git a/native/libcst/tests/fixtures/class_craziness.py b/native/libcst/tests/fixtures/class_craziness.py new file mode 100644 index 00000000..67afc649 --- /dev/null +++ b/native/libcst/tests/fixtures/class_craziness.py @@ -0,0 +1,28 @@ +class Foo: ... + +class Bar : + ... + +class Old ( ) : + gold : int + + +class OO ( Foo ) : ... + +class OOP ( Foo , Bar, ) : pass + +class OOPS ( + Foo , + +) : + pass + +class OOPSI ( Foo, * Bar , metaclass = + foo , +): pass + +class OOPSIE ( list , *args, kw = arg , ** kwargs ) : + what : does_this_even = mean + + def __init__(self) -> None: + self.foo: Bar = Bar() diff --git a/native/libcst/tests/fixtures/comments.py b/native/libcst/tests/fixtures/comments.py new file mode 100644 index 00000000..80830d21 --- /dev/null +++ b/native/libcst/tests/fixtures/comments.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# fmt: on +# Some license here. +# +# Has many lines. Many, many lines. +# Many, many, many lines. +"""Module docstring. + +Possibly also many, many lines. +""" + +import os.path +import sys + +import a +from b.c.d.e import X # some noqa comment + +try: + import fast +except ImportError: + import slow as fast + + +# Some comment before a function. +y = 1 +( + # some strings + y # type: ignore +) + + +def function(default=None): + """Docstring comes first. + + Possibly many lines. + """ + # FIXME: Some comment about why this function is crap but still in production. + import inner_imports + + if inner_imports.are_evil(): + # Explains why we have this if. + # In great detail indeed. + x = X() + return x.method1() # type: ignore + + + # This return is also commented for some reason. + return default + + +# Explains why we use global state. +GLOBAL_STATE = {"a": a(1), "b": a(2), "c": a(3)} + + +# Another comment! +# This time two lines. + + +class Foo: + """Docstring for class Foo. Example from Sphinx docs.""" + + #: Doc comment for class attribute Foo.bar. + #: It can have multiple lines. + bar = 1 + + flox = 1.5 #: Doc comment for Foo.flox. One line only. + + baz = 2 + """Docstring for class attribute Foo.baz.""" + + def __init__(self): + #: Doc comment for instance attribute qux. + self.qux = 3 + + self.spam = 4 + """Docstring for instance attribute spam.""" + + +#'

This is pweave!

+ + +@fast(really=True) +async def wat(): + # This comment, for some reason \ + # contains a trailing backslash. + async with X.open_async() as x: # Some more comments + result = await x.method1() + # Comment after ending a block. + if result: + print("A OK", file=sys.stdout) + # Comment between things. + print() + + +if True: # Hanging comments + # because why not + pass + +# Some closing comments. +# Maybe Vim or Emacs directives for formatting. +# Who knows. diff --git a/native/libcst/tests/fixtures/comparisons.py b/native/libcst/tests/fixtures/comparisons.py new file mode 100644 index 00000000..126ea15e --- /dev/null +++ b/native/libcst/tests/fixtures/comparisons.py @@ -0,0 +1,21 @@ +if not 1: pass +if 1 and 1: pass +if 1 or 1: pass +if not not not 1: pass +if not 1 and 1 and 1: pass +if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass + +if 1: pass +#x = (1 == 1) +if 1 == 1: pass +if 1 != 1: pass +if 1 < 1: pass +if 1 > 1: pass +if 1 <= 1: pass +if 1 >= 1: pass +if x is x: pass +#if x is not x: pass +#if 1 in (): pass +#if 1 not in (): pass +if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 in x is x is x: pass +#if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in x is x is not x: pass diff --git a/native/libcst/tests/fixtures/dangling_indent.py b/native/libcst/tests/fixtures/dangling_indent.py new file mode 100644 index 00000000..32ac6230 --- /dev/null +++ b/native/libcst/tests/fixtures/dangling_indent.py @@ -0,0 +1,3 @@ +if 1: + pass + \ No newline at end of file diff --git a/native/libcst/tests/fixtures/decorated_function_without_body.py b/native/libcst/tests/fixtures/decorated_function_without_body.py new file mode 100644 index 00000000..d7c96e02 --- /dev/null +++ b/native/libcst/tests/fixtures/decorated_function_without_body.py @@ -0,0 +1,3 @@ +@hello +@bello +def f () : ... \ No newline at end of file diff --git a/native/libcst/tests/fixtures/dysfunctional_del.py b/native/libcst/tests/fixtures/dysfunctional_del.py new file mode 100644 index 00000000..a3fa4575 --- /dev/null +++ b/native/libcst/tests/fixtures/dysfunctional_del.py @@ -0,0 +1,14 @@ +# dysfunctional_del.py + +del a + +del a[1] + +del a.b.c +del ( a, b , c ) +del [ a, b , c ] + +del a , b, c + + +del a[1] , b [ 2] \ No newline at end of file diff --git a/native/libcst/tests/fixtures/expr.py b/native/libcst/tests/fixtures/expr.py new file mode 100644 index 00000000..abb78ab9 --- /dev/null +++ b/native/libcst/tests/fixtures/expr.py @@ -0,0 +1,376 @@ +... +"some_string" +b"\\xa3" +Name +None +True +False +1 +1.0 +1j +True or False +True or False or None +True and False +True and False and None +(Name1 and Name2) or Name3 +Name1 and Name2 or Name3 +Name1 or (Name2 and Name3) +Name1 or Name2 and Name3 +(Name1 and Name2) or (Name3 and Name4) +Name1 and Name2 or Name3 and Name4 +Name1 or (Name2 and Name3) or Name4 +Name1 or Name2 and Name3 or Name4 +v1 << 2 +1 >> v2 +1 % finished +1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8 +((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8) +not great +~great ++value +-1 +~int and not v1 ^ 123 + v2 | True +(~int) and (not ((v1 ^ (123 + v2)) | True)) ++(really ** -(confusing ** ~(operator ** -precedence))) +flags & ~ select.EPOLLIN and waiters.write_task is not None +lambda arg: None +lambda arg : None +lambda a=True: a +lambda a=True : a +lambda a, b, c=True: a +lambda a, b, c=True, *, d=(1 << v2), e='str': a +lambda a, b, c=True, *vararg, d=(v1 << 2), e='str', **kwargs: a + b +lambda a, b, c=True, *vararg, d=(v1 << 2), e='str', **kwargs : a + b +manylambdas = lambda x=lambda y=lambda z=1: z: y(): x() +foo = (lambda port_id, ignore_missing: {"port1": port1_resource, "port2": port2_resource}[port_id]) +1 if True else 2 +_ if 0else _ +str or None if True else str or bytes or None +(str or None) if True else (str or bytes or None) +str or None if (1 if True else 2) else str or bytes or None +(str or None) if (1 if True else 2) else (str or bytes or None) +((super_long_variable_name or None) if (1 if super_long_test_name else 2) else (str or bytes or None)) +{'2.7': dead, '3.7': (long_live or die_hard)} +{'2.7': dead, '3.7': (long_live or die_hard), **{'3.6': verygood}} +{**a, **b, **c} +{"2.7", "3.6", "3.7", "3.8", "3.9"} +{"2.7", "3.6", "3.7", "3.8", "3.9",} +{"2.7", "3.6", "3.7", "3.8", "3.9", ("4.0" if gilectomy else "3.10")} +({"a": "b"}, (True or False), (+value), "string", b"bytes") or None +() +(1,) +(1, 2) +(1, 2, 3) +[] +[ ] +[ 1 , ] +[1, 2, 3, 4, 5, 6, 7, 8, 9, (10 or A), (11 or B), (12 or C)] +[ + 1, + 2, + 3, +] +[*a] +[*range(10)] +[ + *a, + 4, + 5, +] +[ + 4, + *a, + 5, +] +[ + this_is_a_very_long_variable_which_will_force_a_delimiter_split, + element, + another, + *more, +] +{ } +{ 1 , } +{ 1 : 2 , } +{i for i in (1, 2, 3)} +{(i ** 2) for i in (1, 2, 3)} +{(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} +{((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} +[i for i in (1, 2, 3)] +[(i ** 2) for i in (1, 2, 3)] +[(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] +[((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] +{i: 0 for i in (1, 2, 3)} +{i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))} +{a: b * 2 for a, b in dictionary.items()} +{a: b * -2 for a, b in dictionary.items()} +{ + k: v + for k, v in this_is_a_very_long_variable_which_will_cause_a_trailing_comma_which_breaks_the_comprehension +} +Python3 > Python2 > COBOL +Life is Life +call() +call(arg) +call(kwarg="hey") +call(arg, kwarg="hey") +call(arg, another, kwarg="hey", **kwargs) +call( + this_is_a_very_long_variable_which_will_force_a_delimiter_split, + arg, + another, + kwarg="hey", + **kwargs, +) # note: no trailing comma pre-3.6 +call(*gidgets[:2]) +call(a, *gidgets[:2]) +call(**screen_kwargs) +call(b, **screen_kwargs) +call()()()()()() +call(**self.screen_kwargs) +call(b, **self.screen_kwargs) +call(a=a, *args) +call(a=a, *args,) +call(a=a, **kwargs) +call(a=a, **kwargs,) +lukasz.langa.pl +call.me(maybe) +1 .real +1.0 .real +....__class__ +list[str] +dict[str, int] +tuple[str, ...] +tuple[str, int, float, dict[str, int]] +tuple[ + str, + int, + float, + dict[str, int], +] +very_long_variable_name_filters: t.List[ + t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]], +] +xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore + sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) +) +xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore + sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) +) +xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( + sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__) +) # type: ignore +(str or None) if (sys.version_info[0] > (3,)) else (str or bytes or None) +{"2.7": dead, "3.7": long_live or die_hard} +{"2.7", "3.6", "3.7", "3.8", "3.9", "4.0" if gilectomy else "3.10"} +[1, 2, 3, 4, 5, 6, 7, 8, 9, 10 or A, 11 or B, 12 or C] +(SomeName) +SomeName +(Good, Bad, Ugly) +(i for i in (1, 2, 3)) +((i ** 2) for i in (1, 2, 3)) +((i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) +(((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) +(*starred,) +{ + "id": "1", + "type": "type", + "started_at": now(), + "ended_at": now() + timedelta(days=10), + "priority": 1, + "import_session_id": 1, + **kwargs, +} +a = (1,) +b = (1,) +c = 1 +d = (1,) + a + (2,) +e = (1,).count(1) +f = 1, *range(10) +g = 1, *"ten" +what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set( + vars_to_remove +) +what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set( + vars_to_remove +) +result = ( + session.query(models.Customer.id) + .filter( + models.Customer.account_id == account_id, models.Customer.email == email_address + ) + .order_by(models.Customer.id.asc()) + .all() +) +result = ( + session.query(models.Customer.id) + .filter( + models.Customer.account_id == account_id, models.Customer.email == email_address + ) + .order_by( + models.Customer.id.asc(), + ) + .all() +) +Ø = set() +authors.łukasz.say_thanks() +authors.lukasz.say_thanks() +mapping = { + A: 0.25 * (10.0 / 12), + B: 0.1 * (10.0 / 12), + C: 0.1 * (10.0 / 12), + D: 0.1 * (10.0 / 12), +} +[ + a + for + [ + a , + ] + in + [ + [ 1 ] + ] +] + +def gen(): + if 1: + if 2: + if 3: + if not is_value_of_type( + subkey, + type_args[0], + # key type is always invariant + invariant_check=True, + ): + return False + yield from outside_of_generator + a = yield + b = yield + c = yield + + +async def f(): + await some.complicated[0].call(with_args=(True or (1 is not 1))) + +lambda : None + +print(*[] or [1]) +print(**{1: 3} if False else {x: x for x in range(3)}) +print(*lambda x: x) +assert not Test, "Short message" +assert this is ComplexTest and not requirements.fit_in_a_single_line( + force=False +), "Short message" +assert parens is TooMany +for (x,) in (1,), (2,), (3,): + ... +for y in (): + ... +for z in (i for i in (1, 2, 3)): + ... +for i in call(): + ... +for j in 1 + (2 + 3): + ... +else: + ... +while this and that: + ... +while this and that: + ... +else: + ... +for ( + addr_family, + addr_type, + addr_proto, + addr_canonname, + addr_sockaddr, +) in socket.getaddrinfo("google.com", "http"): + pass +a = ( + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp + in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz +) +a = ( + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp + not in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz +) +a = ( + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp + is qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz +) +a = ( + aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp + is not qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz +) +if ( + threading.current_thread() != threading.main_thread() + and threading.current_thread() != threading.main_thread() + or signal.getsignal(signal.SIGINT) != signal.default_int_handler +): + return True +if ( + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +): + return True +if ( + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +): + return True +if ( + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +): + return True +if ( + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +): + return True +if ( + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + * aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +): + return True +if ( + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + / aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +): + return True +if ( + ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e + | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n +): + return True +if ( + ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e + | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h + ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n +): + return True +if ( + ~aaaaaaaaaaaaaaaa.a + + aaaaaaaaaaaaaaaa.b + - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e + | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h + ^ aaaaaaaaaaaaaaaa.i + << aaaaaaaaaaaaaaaa.k + >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n +): + return True +aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaa * ( + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa +) / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) +aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +bbbb >> bbbb * bbbb +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ^ bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ^ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + +a += B +a[x] @= foo().bar +this.is_not >>= a.monad +last_call() +# standalone comment at ENDMARKER diff --git a/native/libcst/tests/fixtures/expr_statement.py b/native/libcst/tests/fixtures/expr_statement.py new file mode 100644 index 00000000..4ef73f08 --- /dev/null +++ b/native/libcst/tests/fixtures/expr_statement.py @@ -0,0 +1,11 @@ +1 +1, 2, 3 +x = 1 +x = 1, 2, 3 +x = y = z = 1, 2, 3 +x, y, z = 1, 2, 3 +abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4) + +( ( ( ... ) ) ) + +a , = b \ No newline at end of file diff --git a/native/libcst/tests/fixtures/fun_with_func_defs.py b/native/libcst/tests/fixtures/fun_with_func_defs.py new file mode 100644 index 00000000..9e048a75 --- /dev/null +++ b/native/libcst/tests/fixtures/fun_with_func_defs.py @@ -0,0 +1,157 @@ +def f(a, /,): pass +def f(a, / ,): pass +def f(a, / ): pass +def f(a, /, c, d, e): pass +def f(a, /, c, *, d, e): pass +def f(a, /, c, *, d, e, **kwargs): pass +def f(a=1, /,): pass +def f(a=1, /, b=2, c=4): pass +def f(a=1, /, b=2, *, c=4): pass +def f(a=1, /, b=2, *, c): pass +def f(a=1, /, b=2, *, c=4, **kwargs): pass +def f(a=1, /, b=2, *, c, **kwargs,): pass + + +def g( + a, + /, +): + pass + + +def f(a, /, c, d, e): + pass + + +def f(a, /, c, *, d, e): + pass + +def foo(a, * + , bar): + pass + + +def f( + a, + /, + c, + *, + d, + e, + **kwargs, +): + pass + + +def f( + a=1, + /, +): + pass + +def say_hello( + self, user: str, / +): + print('Hello ' + user) + + +def f(a=1, /, b=2, c=4): + pass + + +def f(a=1, /, b=2, *, c=4): + pass + + +def f(a=1, /, b=2, *, c): + pass + + +def f( + a=1, + /, + b=2, + *, + c=4, + **kwargs, +): + pass + + +def f( + a=1, + /, + b=2, + *, + c, + **kwargs, +): + pass + + +async def foo ( + bar : Baz , +) -> zooooooooom : ... + + +async def foo(bar : Baz = 0 ) : ... + +async def foo() -> Bar: ... + +async def outer( + foo +) -> Bar : + def inner(lol: Lol) -> None: + async def core (): + await lol + def second(inner): + pass + +def stars ( + yes : bool = True , + / , + noes : List[bool] = [ * falses ], + * all : The[Rest], + but : Wait[Theres[More]] , + ** it : ends[now] , + +) -> ret: + pass + +def stars ( + yes : bool = True , + / , + noes : List[bool] = [ * falses ], + * all : The[Rest], + but : Wait[Theres[More]] , + ** it : ends[now[without_a_comma]] + +) -> ret : + pass + + +def foo(bar: (yield)) -> (yield): something: (yield another) + +def foo( bar: (yield)) -> (yield) : + something: (yield another) + return 3 # no + return # yes + + +def f(): + for (yield 1)[1] in [1]: + pass + + +@decorators +# foo +@woohoo +def f(): + pass + +@getattr(None, '', lambda a: lambda b: a(b+1)) +def f(): ... + + +@a(now_this = lol) +def f(): ... diff --git a/native/libcst/tests/fixtures/global_nonlocal.py b/native/libcst/tests/fixtures/global_nonlocal.py new file mode 100644 index 00000000..a9839aef --- /dev/null +++ b/native/libcst/tests/fixtures/global_nonlocal.py @@ -0,0 +1,4 @@ +global a +global b , c, d +nonlocal a +nonlocal a , b \ No newline at end of file diff --git a/native/libcst/tests/fixtures/import.py b/native/libcst/tests/fixtures/import.py new file mode 100644 index 00000000..571e3640 --- /dev/null +++ b/native/libcst/tests/fixtures/import.py @@ -0,0 +1,19 @@ +# 'import' dotted_as_names +import sys +import time, sys +# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names) +from time import time +from time import (time) +from sys import path, argv +from sys import (path, argv) +from sys import (path, argv,) +from sys import * + + +from a import (b, ) +from . import a +from .a import b +from ... import a +from ...a import b +from .... import a +from ...... import a \ No newline at end of file diff --git a/native/libcst/tests/fixtures/indents_but_no_eol_before_eof.py b/native/libcst/tests/fixtures/indents_but_no_eol_before_eof.py new file mode 100644 index 00000000..22fd1309 --- /dev/null +++ b/native/libcst/tests/fixtures/indents_but_no_eol_before_eof.py @@ -0,0 +1,4 @@ +if 1: + if 2: + if 3: + pass \ No newline at end of file diff --git a/native/libcst/tests/fixtures/just_a_comment_without_nl.py b/native/libcst/tests/fixtures/just_a_comment_without_nl.py new file mode 100644 index 00000000..56e1a2f8 --- /dev/null +++ b/native/libcst/tests/fixtures/just_a_comment_without_nl.py @@ -0,0 +1 @@ +# just a comment without a newline \ No newline at end of file diff --git a/native/libcst/tests/fixtures/malicious_match.py b/native/libcst/tests/fixtures/malicious_match.py new file mode 100644 index 00000000..54840022 --- /dev/null +++ b/native/libcst/tests/fixtures/malicious_match.py @@ -0,0 +1,42 @@ + +# foo + +match ( foo ) : #comment + +# more comments + case False : # comment + + ... + case ( True ) : ... + case _ : ... + case ( _ ) : ... # foo + +# bar + +match x: + case "StringMatchValue" : pass + case [1, 2] : pass + case [ 1 , * foo , * _ , ]: pass + case [ [ _, ] , *_ ]: pass + case {1: _, 2: _}: pass + case { "foo" : bar , ** rest } : pass + case { 1 : {**rest} , } : pass + case Point2D(): pass + case Cls ( 0 , ) : pass + case Cls ( x=0, y = 2) :pass + case Cls ( 0 , 1 , x = 0 , y = 2 ) : pass + case [x] as y: pass + case [x] as y : pass + case (True)as x:pass + case Foo:pass + case (Foo):pass + case ( Foo ) : pass + case [ ( Foo ) , ]: pass + case Foo|Bar|Baz : pass + case Foo | Bar | ( Baz): pass + case x,y , * more :pass + case y.z: pass + case 1, 2: pass + case ( Foo ( ) ) : pass + case (lol) if ( True , ) :pass + diff --git a/native/libcst/tests/fixtures/mixed_newlines.py b/native/libcst/tests/fixtures/mixed_newlines.py new file mode 100644 index 00000000..935a8b45 --- /dev/null +++ b/native/libcst/tests/fixtures/mixed_newlines.py @@ -0,0 +1,3 @@ +"" % { + 'test1': '', 'test2': '', +} diff --git a/native/libcst/tests/fixtures/pep646.py b/native/libcst/tests/fixtures/pep646.py new file mode 100644 index 00000000..6af0e6f1 --- /dev/null +++ b/native/libcst/tests/fixtures/pep646.py @@ -0,0 +1,37 @@ +# see https://github.com/python/cpython/pull/31018/files#diff-3f516b60719dd445d33225e4f316b36e85c9c51a843a0147349d11a005c55937 + +A[*b] +A[ * b ] +A[ * b , ] +A[*b] = 1 +del A[*b] + +A[* b , * b] +A[ b, *b] +A[* b, b] +A[ * b,b, b] +A[b, *b, b] + +A[*A[b, *b, b], b] +A[b, ...] +A[*A[b, ...]] + +A[ * ( 1,2,3)] +A[ * [ 1,2,3]] + +A[1:2, *t] +A[1:, *t, 1:2] +A[:, *t, :] +A[*t, :, *t] + +A[* returns_list()] +A[*returns_list(), * returns_list(), b] + +def f1(*args: *b): pass +def f2(*args: *b, arg1): pass +def f3(*args: *b, arg1: int): pass +def f4(*args: *b, arg1: int = 1): pass + +def f(*args: *tuple[int, ...]): pass +def f(*args: *tuple[int, *Ts]): pass +def f() -> tuple[int, *tuple[int, ...]]: pass \ No newline at end of file diff --git a/native/libcst/tests/fixtures/raise.py b/native/libcst/tests/fixtures/raise.py new file mode 100644 index 00000000..28613862 --- /dev/null +++ b/native/libcst/tests/fixtures/raise.py @@ -0,0 +1,4 @@ +raise +raise foo +raise foo from bar +raise lol() from f() + 1 \ No newline at end of file diff --git a/native/libcst/tests/fixtures/smol_statements.py b/native/libcst/tests/fixtures/smol_statements.py new file mode 100644 index 00000000..93687bce --- /dev/null +++ b/native/libcst/tests/fixtures/smol_statements.py @@ -0,0 +1,4 @@ +def f(): + pass ; break ; continue ; return ; return foo + + assert foo , bar ; a += 2 \ No newline at end of file diff --git a/native/libcst/tests/fixtures/spacious_spaces.py b/native/libcst/tests/fixtures/spacious_spaces.py new file mode 100644 index 00000000..5c979eee --- /dev/null +++ b/native/libcst/tests/fixtures/spacious_spaces.py @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/native/libcst/tests/fixtures/starry_tries.py b/native/libcst/tests/fixtures/starry_tries.py new file mode 100644 index 00000000..2749a031 --- /dev/null +++ b/native/libcst/tests/fixtures/starry_tries.py @@ -0,0 +1,64 @@ +#foo. + +try : + pass + + # foo + +except * lol as LOL : + + pass + +except * f: + + # foo + + pass + +else : + + pass + +finally : + + foo + +try: + pass +except*f: + pass +finally: + pass + + +try: + + # 1 + + try: + + # 2 + + pass + + # 3 + + # 4 + + finally: + + # 5 + + pass + + # 6 + + # 7 + +except *foo: + + #8 + + pass + + #9 diff --git a/native/libcst/tests/fixtures/suicidal_slices.py b/native/libcst/tests/fixtures/suicidal_slices.py new file mode 100644 index 00000000..8d9566e8 --- /dev/null +++ b/native/libcst/tests/fixtures/suicidal_slices.py @@ -0,0 +1,28 @@ +slice[0] +slice[0:1] +slice[0:1:2] +slice[:] +slice[:-1] +slice[1:] +slice[::-1] +slice[d :: d + 1] +slice[:c, c - 1] +numpy[:, 0:1] +numpy[:, :-1] +numpy[0, :] +numpy[:, i] +numpy[0, :2] +numpy[:N, 0] +numpy[:2, :4] +numpy[2:4, 1:5] +numpy[4:, 2:] +numpy[:, (0, 1, 2, 5)] +numpy[0, [0]] +numpy[:, [i]] +numpy[1 : c + 1, c] +numpy[-(c + 1) :, d] +numpy[:, l[-2]] +numpy[:, ::-1] +numpy[np.newaxis, :] + +( spaces [:: , a : , a : a : a , ] ) \ No newline at end of file diff --git a/native/libcst/tests/fixtures/super_strings.py b/native/libcst/tests/fixtures/super_strings.py new file mode 100644 index 00000000..369270ab --- /dev/null +++ b/native/libcst/tests/fixtures/super_strings.py @@ -0,0 +1,58 @@ +_ = "" +_ = '' +_ = """""" +_ = '''''' + +_ = 'a' "string" 'that' r"is" 'concatenated ' + +b"string " +b"and non f" rb'string' + +( + "parenthesized" + "concatenated" + """triple + quoted + """ + +) + +_ = f"string" + +f"string" "bonanza" f'starts' r"""here""" + +_ = f"something {{**not** an expression}} {but(this._is)} {{and this isn't.}} end" + +_(f"ok { expr = !r: aosidjhoi } end") + +print(f"{self.ERASE_CURRENT_LINE}{self._human_seconds(elapsed_time)} {percent:.{self.pretty_precision}f}% complete, {self.estimate_completion(elapsed_time, finished, left)} estimated for {left} files to go...") + +f"{"\n".join()}" + +f"___{ + x +}___" + +f"___{( + x +)}___" + +f'\{{\}}' +f"regexp_like(path, '.*\{file_type}$')" +f"\lfoo" + +f"{_:{_:}{a}}" + +f"foo {f"bar {x}"} baz" +f'some words {a+b:.3f} more words {c+d=} final words' +f"{'':*^{1:{1}}}" +f"{'':*^{1:{1:{1}}}}" +f"{f"{f"{f"{f"{f"{1+1}"}"}"}"}"}" + + +t'Nothing to see here, move along' +t"User {action}: {amount:.2f} {item}" +t"

HTML is code {too}

" +t"value={value!r}" +t"This wrinkles my brain {value:.{precision}f}" +_ = t"everything" + t" is {tstrings}" diff --git a/native/libcst/tests/fixtures/terrible_tries.py b/native/libcst/tests/fixtures/terrible_tries.py new file mode 100644 index 00000000..eb5429cc --- /dev/null +++ b/native/libcst/tests/fixtures/terrible_tries.py @@ -0,0 +1,93 @@ +#foo. + +try : + bar() + +finally : + pass + + +try : + pass + + # foo + +except lol as LOL : + + pass + +except : + + # foo + + pass + +else : + + pass + +finally : + + foo + +try: + pass +except: + pass +finally: + pass + + +try: + + # 1 + + try: + + # 2 + + pass + + # 3 + + # 4 + + finally: + + # 5 + + pass + + # 6 + + # 7 + +except foo: + + #8 + + pass + + #9 + +try: + pass +except (foo, bar): + pass + +try: + pass +except foo, bar: + pass + +try: + pass +except (foo, bar), baz: + pass +else: + pass + +try: + pass +except* something, somethingelse: + pass \ No newline at end of file diff --git a/native/libcst/tests/fixtures/trailing_comment_without_nl.py b/native/libcst/tests/fixtures/trailing_comment_without_nl.py new file mode 100644 index 00000000..0c58f18d --- /dev/null +++ b/native/libcst/tests/fixtures/trailing_comment_without_nl.py @@ -0,0 +1,4 @@ + + + +# hehehe >:) \ No newline at end of file diff --git a/native/libcst/tests/fixtures/trailing_whitespace.py b/native/libcst/tests/fixtures/trailing_whitespace.py new file mode 100644 index 00000000..5a01c197 --- /dev/null +++ b/native/libcst/tests/fixtures/trailing_whitespace.py @@ -0,0 +1,5 @@ + + +x = 42 +print(x) + \ No newline at end of file diff --git a/native/libcst/tests/fixtures/tuple_shenanigans.py b/native/libcst/tests/fixtures/tuple_shenanigans.py new file mode 100644 index 00000000..136d79d2 --- /dev/null +++ b/native/libcst/tests/fixtures/tuple_shenanigans.py @@ -0,0 +1,30 @@ +(1, 2) +(1, 2, 3) + +# alright here we go. + +() +(()) +(((())), ()) +( # evil >:) + # evil >:( +) # ... +(1,) +( * 1 , * 2 ,) +*_ = (l,) +() = x +( ) = ( x, ) +(x) = (x) +( x , ) = x +( x , *y , * z , ) = l +( x , *y , * z , ) = ( x , *y , * z , ) = ( x , *y , * z , x ) +( + x , # :) + bar, * + baz + , +) =\ +( + (let, *s, ( ) ) , + nest , them , ( * t , * u , * p , l , * e , s , ) +) \ No newline at end of file diff --git a/native/libcst/tests/fixtures/type_parameters.py b/native/libcst/tests/fixtures/type_parameters.py new file mode 100644 index 00000000..ef6a39eb --- /dev/null +++ b/native/libcst/tests/fixtures/type_parameters.py @@ -0,0 +1,69 @@ +# fmt: off + +type TA = int + +type TA1[A] = lambda A: A + +class Outer[A]: + type TA1[A] = None + +type TA1[A, B] = dict[A, B] + +class Outer[A]: + def inner[B](self): + type TA1[C] = TA1[A, B] | int + return TA1 + +def more_generic[T, *Ts, **P](): + type TA[T2, *Ts2, **P2] = tuple[Callable[P, tuple[T, *Ts]], Callable[P2, tuple[T2, *Ts2]]] + return TA + +type Recursive = Recursive + +def func[A](A): return A + +class ClassA: + def func[__A](self, __A): return __A + +class ClassA[A, B](dict[A, B]): + ... + +class ClassA[A]: + def funcB[B](self): + class ClassC[C]: + def funcD[D](self): + return lambda: (A, B, C, D) + return ClassC + +class Child[T](Base[lambda: (int, outer_var, T)]): ... + +type Alias[T: ([T for T in (T, [1])[1]], T)] = [T for T in T.__name__] +type Alias[T: [lambda: T for T in (T, [1])[1]]] = [lambda: T for T in T.__name__] + +class Foo[T: Foo, U: (Foo, Foo)]: + pass + +def func[T](a: T = "a", *, b: T = "b"): + return (a, b) + +def func1[A: str, B: str | int, C: (int, str)](): + return (A, B, C) + +type A [ T , * V ] =foo;type B=A + +def AAAAAAAAAAAAAAAAAA [ T : int ,*Ts , ** TT ] ():pass +class AAAAAAAAAAAAAAAAAA [ T : int ,*Ts , ** TT ] :pass + +def yikes[A:int,*B,**C](*d:*tuple[A,*B,...])->A:pass + +def func[T=int, **U=float, *V=None](): pass + +class C[T=int, **U=float, *V=None]: pass + +type Alias[T = int, **U = float, *V = None] = int + +default = tuple[int, str] +type Alias[*Ts = *default] = Ts +type Foo[ * T = * default ] = int +type Foo[*T=*default ]=int +type Foo [ * T = * default ] = int \ No newline at end of file diff --git a/native/libcst/tests/fixtures/vast_emptiness.py b/native/libcst/tests/fixtures/vast_emptiness.py new file mode 100644 index 00000000..e69de29b diff --git a/native/libcst/tests/fixtures/with_wickedness.py b/native/libcst/tests/fixtures/with_wickedness.py new file mode 100644 index 00000000..ee6ff7b6 --- /dev/null +++ b/native/libcst/tests/fixtures/with_wickedness.py @@ -0,0 +1,52 @@ +# with_wickedness + +with foo : + pass + +with foo, bar: + pass + +with (foo, bar): + pass + +with (foo, bar,): + pass + +with foo, bar as bar: + pass + +with (foo, bar as bar): + pass + +with (foo, bar as bar,): + pass + +async def f(): + async with foo: + + with bar: + pass + + async with foo : + pass + + async with foo, bar: + pass + + async with (foo, bar): + pass + + async with (foo, bar,): + pass + + async with foo, bar as bar: + pass + + async with (foo, bar as bar): + pass + + async with (foo, bar as bar,): + pass + + async with foo(1+1) as bar , 1 as (a, b, ) , 2 as [a, b] , 3 as a[b] : + pass diff --git a/native/libcst/tests/fixtures/wonky_walrus.py b/native/libcst/tests/fixtures/wonky_walrus.py new file mode 100644 index 00000000..d506b169 --- /dev/null +++ b/native/libcst/tests/fixtures/wonky_walrus.py @@ -0,0 +1,15 @@ +( foo := 5 ) + +any((lastNum := num) == 1 for num in [1, 2, 3]) + +[(lastNum := num) == 1 for num in [1, 2, 3]] + +while f := x(): + pass + +if f := x(): pass + +f(y:=1) +f(x, y := 1 ) + +_[_:=10] \ No newline at end of file diff --git a/native/libcst/tests/parser_roundtrip.rs b/native/libcst/tests/parser_roundtrip.rs new file mode 100644 index 00000000..7618eaec --- /dev/null +++ b/native/libcst/tests/parser_roundtrip.rs @@ -0,0 +1,55 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use difference::assert_diff; +use itertools::Itertools; +use libcst_native::{parse_module, prettify_error, Codegen}; +use std::{ + iter::once, + path::{Component, PathBuf}, +}; + +fn all_fixtures() -> impl Iterator { + let mut path = PathBuf::from(file!()); + path.pop(); + path = path + .components() + .skip(1) + .chain(once(Component::Normal("fixtures".as_ref()))) + .collect(); + + path.read_dir().expect("read_dir").into_iter().map(|file| { + let path = file.unwrap().path(); + let contents = std::fs::read_to_string(&path).expect("reading file"); + (path, contents) + }) +} + +#[test] +fn roundtrip_fixtures() { + for (path, input) in all_fixtures() { + let input = if let Some(stripped) = input.strip_prefix('\u{feff}') { + stripped + } else { + &input + }; + let m = match parse_module(input, None) { + Ok(m) => m, + Err(e) => panic!("{}", prettify_error(e, format!("{:#?}", path).as_ref())), + }; + let mut state = Default::default(); + m.codegen(&mut state); + let generated = state.to_string(); + if generated != input { + let got = visualize(&generated); + let expected = visualize(input); + assert_diff!(expected.as_ref(), got.as_ref(), "", 0); + } + } +} + +fn visualize(s: &str) -> String { + s.replace(' ', "▩").lines().join("↩\n") +} diff --git a/native/libcst_derive/Cargo.toml b/native/libcst_derive/Cargo.toml new file mode 100644 index 00000000..bf9959ab --- /dev/null +++ b/native/libcst_derive/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "libcst_derive" +version = "1.8.6" +edition = "2018" +description = "Proc macro helpers for libcst." +license = "MIT" +repository = "https://github.com/Instagram/LibCST" +documentation = "https://libcst.rtfd.org" +keywords = ["macros", "python"] + +[lib] +proc-macro = true + +[dependencies] +syn = "2.0" +quote = "1.0" + +[dev-dependencies] +trybuild = "1.0" diff --git a/native/libcst_derive/LICENSE b/native/libcst_derive/LICENSE new file mode 100644 index 00000000..5594616f --- /dev/null +++ b/native/libcst_derive/LICENSE @@ -0,0 +1,102 @@ +All contributions towards LibCST are MIT licensed. + +Some Python files have been derived from the standard library and are therefore +PSF licensed. Modifications on these files are dual licensed (both MIT and +PSF). These files are: + +- libcst/_parser/base_parser.py +- libcst/_parser/parso/utils.py +- libcst/_parser/parso/pgen2/generator.py +- libcst/_parser/parso/pgen2/grammar_parser.py +- libcst/_parser/parso/python/py_token.py +- libcst/_parser/parso/python/tokenize.py +- libcst/_parser/parso/tests/test_fstring.py +- libcst/_parser/parso/tests/test_tokenize.py +- libcst/_parser/parso/tests/test_utils.py +- native/libcst/src/tokenizer/core/mod.rs +- native/libcst/src/tokenizer/core/string_types.rs + +Some Python files have been taken from dataclasses and are therefore Apache +licensed. Modifications on these files are licensed under Apache 2.0 license. +These files are: + +- libcst/_add_slots.py + +------------------------------------------------------------------------------- + +MIT License + +Copyright (c) Meta Platforms, Inc. and affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------------------------------------------------------------------- + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +are retained in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + +------------------------------------------------------------------------------- + +APACHE LICENSE, VERSION 2.0 + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/native/libcst_derive/src/codegen.rs b/native/libcst_derive/src/codegen.rs new file mode 100644 index 00000000..7dee8adc --- /dev/null +++ b/native/libcst_derive/src/codegen.rs @@ -0,0 +1,68 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use proc_macro::TokenStream; +use quote::{quote, quote_spanned}; +use syn::{self, spanned::Spanned, Data, DataEnum, DeriveInput, Fields, FieldsUnnamed}; + +pub(crate) fn impl_codegen(ast: &DeriveInput) -> TokenStream { + match &ast.data { + Data::Enum(e) => impl_enum(ast, e), + Data::Struct(s) => quote_spanned! { + s.struct_token.span() => + compile_error!("Struct type is not supported") + } + .into(), + Data::Union(u) => quote_spanned! { + u.union_token.span() => + compile_error!("Union type is not supported") + } + .into(), + } +} + +fn impl_enum(ast: &DeriveInput, e: &DataEnum) -> TokenStream { + let mut varnames = vec![]; + for var in e.variants.iter() { + match &var.fields { + Fields::Named(n) => { + return quote_spanned! { + n.span() => + compile_error!("Named enum fields not supported") + } + .into() + } + f @ Fields::Unit => { + return quote_spanned! { + f.span() => + compile_error!("Empty enum variants not supported") + } + .into() + } + Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { + if unnamed.len() > 1 { + return quote_spanned! { + unnamed.span() => + compile_error!("Multiple unnamed fields not supported") + } + .into(); + } + varnames.push(&var.ident); + } + } + } + let ident = &ast.ident; + let generics = &ast.generics; + let gen = quote! { + impl<'a> Codegen<'a> for #ident #generics { + fn codegen(&self, state: &mut CodegenState<'a>) { + match self { + #(Self::#varnames(x) => x.codegen(state),)* + } + } + } + }; + gen.into() +} diff --git a/native/libcst_derive/src/cstnode.rs b/native/libcst_derive/src/cstnode.rs new file mode 100644 index 00000000..cdb6947e --- /dev/null +++ b/native/libcst_derive/src/cstnode.rs @@ -0,0 +1,447 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use proc_macro::TokenStream; +use quote::{format_ident, quote, quote_spanned, ToTokens}; +use syn::{ + self, + parse::{Parse, ParseStream}, + parse_quote, + punctuated::{Pair, Punctuated}, + spanned::Spanned, + token::Comma, + AngleBracketedGenericArguments, Attribute, Data, DataEnum, DataStruct, DeriveInput, Field, + Fields, FieldsNamed, FieldsUnnamed, GenericArgument, Generics, Ident, Meta, Path, + PathArguments, PathSegment, Token, Type, TypePath, Visibility, +}; + +pub(crate) struct CSTNodeParams { + traits: Punctuated, +} + +#[derive(PartialEq, Eq)] +enum SupportedTrait { + ParenthesizedNode, + Codegen, + Inflate, + NoIntoPy, + Default, +} + +pub(crate) fn impl_cst_node(ast: DeriveInput, args: CSTNodeParams) -> TokenStream { + match ast.data { + Data::Enum(e) => impl_enum(args, ast.attrs, ast.vis, ast.ident, ast.generics, e), + Data::Struct(s) => impl_struct(args, ast.attrs, ast.vis, ast.ident, ast.generics, s), + Data::Union(u) => quote_spanned! { + u.union_token.span() => + compile_error!("Union type is not supported") + } + .into(), + } +} + +impl CSTNodeParams { + fn has_trait(&self, treyt: &SupportedTrait) -> bool { + self.traits.iter().any(|x| x == treyt) + } +} + +impl Parse for SupportedTrait { + fn parse(input: ParseStream) -> syn::Result { + if input.peek(Ident) { + let id: Ident = input.parse()?; + return match id.to_string().as_str() { + "ParenthesizedNode" => Ok(Self::ParenthesizedNode), + "Codegen" => Ok(Self::Codegen), + "Inflate" => Ok(Self::Inflate), + "NoIntoPy" => Ok(Self::NoIntoPy), + "Default" => Ok(Self::Default), + _ => Err(input.error("Not a supported trait to derive for cst_node")), + }; + } + Err(input.error("Pass in trait names to be derived")) + } +} + +impl Parse for CSTNodeParams { + fn parse(input: ParseStream) -> syn::Result { + Ok(Self { + traits: input.parse_terminated(SupportedTrait::parse, Token![,])?, + }) + } +} + +// enum Foo<'a> { +// Variant(Box>), +// } +// => +// enum Foo<'a> { +// Variant(Box>), +// } +// enum DeflatedFoo<'r, 'a> { +// Variant(Box>), +// } + +fn impl_enum( + args: CSTNodeParams, + mut attrs: Vec, + vis: Visibility, + ident: Ident, + generics: Generics, + mut e: DataEnum, +) -> TokenStream { + let deflated_vis = vis.clone(); + let deflated_ident = format_ident!("Deflated{}", &ident); + let deflated_generics: Generics = parse_quote!(<'r, 'a>); + let mut deflated_variant_tokens = vec![]; + + for var in e.variants.iter_mut() { + let (inflated_fields, deflated_fields) = impl_fields(var.fields.clone()); + var.fields = deflated_fields; + deflated_variant_tokens.push(var.to_token_stream()); + var.fields = inflated_fields; + } + add_inflated_attrs(&args, &mut attrs); + let inflated = DeriveInput { + attrs, + vis, + ident, + generics, + data: Data::Enum(e), + }; + + let deflated_attrs = get_deflated_attrs(&args); + + let gen = quote! { + #[derive(Debug, PartialEq, Eq, Clone)] + #inflated + + #[derive(Debug, PartialEq, Eq, Clone)] + #(#deflated_attrs)* + #deflated_vis enum #deflated_ident#deflated_generics { + #(#deflated_variant_tokens,)* + } + }; + gen.into() +} + +fn get_deflated_attrs(args: &CSTNodeParams) -> Vec { + let mut deflated_attrs: Vec = vec![]; + if args.has_trait(&SupportedTrait::Inflate) { + deflated_attrs.push(parse_quote!(#[derive(Inflate)])); + } + if args.has_trait(&SupportedTrait::ParenthesizedNode) { + deflated_attrs.push(parse_quote!(#[derive(ParenthesizedDeflatedNode)])) + } + if args.has_trait(&SupportedTrait::Default) { + deflated_attrs.push(parse_quote!(#[derive(Default)])); + } + deflated_attrs +} + +fn add_inflated_attrs(args: &CSTNodeParams, attrs: &mut Vec) { + if args.has_trait(&SupportedTrait::Codegen) { + attrs.push(parse_quote!(#[derive(Codegen)])); + } + if args.has_trait(&SupportedTrait::ParenthesizedNode) { + attrs.push(parse_quote!(#[derive(ParenthesizedNode)])); + } + if args.has_trait(&SupportedTrait::Default) { + attrs.push(parse_quote!(#[derive(Default)])); + } + if !args.has_trait(&SupportedTrait::NoIntoPy) { + attrs.push(parse_quote!(#[cfg_attr(feature = "py", derive(TryIntoPy))])); + } +} + +// pub struct Foo<'a> { +// pub bar: Bar<'a>, +// pub value: &'a str, +// pub whitespace_after: SimpleWhitespace<'a>, +// pub(crate) tok: Option, +// } +// => +// pub struct Foo<'a> { +// pub bar: Bar<'a>, +// pub value: &'a str, +// pub whitespace_after: SimpleWhitespace<'a>, +// } +// struct DeflatedFoo<'r, 'a> { +// pub bar: DeflatedBar<'r, 'a>, +// pub value: &'a str, +// pub tok: Option> +// } + +fn impl_struct( + args: CSTNodeParams, + mut attrs: Vec, + vis: Visibility, + ident: Ident, + generics: Generics, + mut s: DataStruct, +) -> TokenStream { + let deflated_vis = vis.clone(); + let deflated_ident = format_ident!("Deflated{}", &ident); + let deflated_generics: Generics = parse_quote!(<'r, 'a>); + + let (inflated_fields, deflated_fields) = impl_fields(s.fields); + s.fields = inflated_fields; + + add_inflated_attrs(&args, &mut attrs); + + let inflated = DeriveInput { + attrs, + vis, + ident, + generics, + data: Data::Struct(s), + }; + + let deflated_attrs = get_deflated_attrs(&args); + + let gen = quote! { + #[derive(Debug, PartialEq, Eq, Clone)] + #inflated + + #[derive(Debug, PartialEq, Eq, Clone)] + #(#deflated_attrs)* + #deflated_vis struct #deflated_ident#deflated_generics + #deflated_fields + + }; + gen.into() +} + +fn impl_fields(fields: Fields) -> (Fields, Fields) { + match &fields { + Fields::Unnamed(fs) => { + let deflated_fields = impl_unnamed_fields(fs.clone()); + (fields, Fields::Unnamed(deflated_fields)) + } + Fields::Named(fs) => impl_named_fields(fs.clone()), + Fields::Unit => (Fields::Unit, Fields::Unit), + } +} + +fn impl_unnamed_fields(mut deflated_fields: FieldsUnnamed) -> FieldsUnnamed { + let mut added_lifetime = false; + deflated_fields.unnamed = deflated_fields + .unnamed + .into_pairs() + .map(|pair| { + let (deflated, lifetime) = make_into_deflated(pair); + added_lifetime |= lifetime; + deflated + }) + .collect(); + + // Make sure all Deflated* types have 'r 'a lifetime params + if !added_lifetime { + deflated_fields.unnamed.push(parse_quote! { + std::marker::PhantomData<&'r &'a ()> + }); + } + deflated_fields +} + +fn impl_named_fields(mut fields: FieldsNamed) -> (Fields, Fields) { + let mut deflated_fields = fields.clone(); + let mut added_lifetime = false; + // Drop whitespace fields from deflated fields + // And add lifetimes to tokenref fields + deflated_fields.named = deflated_fields + .named + .into_pairs() + .filter(|pair| { + let id = pair.value().ident.as_ref().unwrap().to_string(); + !id.contains("whitespace") + && id != "footer" + && id != "header" + && id != "leading_lines" + && id != "lines_after_decorators" + }) + .map(|pair| { + if is_builtin(pair.value()) { + pair + } else { + let (deflated, lifetime) = make_into_deflated(pair); + added_lifetime |= lifetime; + deflated + } + }) + .map(|pair| { + let (mut val, punct) = pair.into_tuple(); + val.attrs = val.attrs.into_iter().filter(is_not_intopy_attr).collect(); + Pair::new(val, punct) + }) + .collect(); + + // Make sure all Deflated* types have 'r 'a lifetime params + if !added_lifetime { + deflated_fields.named.push(parse_quote! { + _phantom: std::marker::PhantomData<&'r &'a ()> + }); + } + + // Drop tokenref fields from inflated fields + fields.named = fields + .named + .into_pairs() + .filter(|pair| !is_token_ref(pair.value())) + .collect(); + + (Fields::Named(fields), Fields::Named(deflated_fields)) +} + +fn is_builtin(field: &Field) -> bool { + get_pathseg(&field.ty) + .map(|seg| { + let segstr = seg.ident.to_string(); + segstr == "str" || segstr == "bool" || segstr == "String" + }) + .unwrap_or_default() +} + +fn is_token_ref(field: &Field) -> bool { + if let Some(seg) = rightmost_path_segment(&field.ty) { + return format!("{}", seg.ident) == "TokenRef"; + } + false +} + +// foo::bar -> foo::Deflatedbar<'r, 'a> +fn make_into_deflated(mut pair: Pair) -> (Pair, bool) { + let mut added_lifetime = true; + if let Some(seg) = rightmost_path_segment_mut(&mut pair.value_mut().ty) { + let seg_name = seg.ident.to_string(); + if seg_name != "TokenRef" { + seg.ident = format_ident!("Deflated{}", seg_name); + } + match seg.arguments { + PathArguments::None => { + seg.arguments = PathArguments::AngleBracketed(parse_quote!(<'r, 'a>)); + } + PathArguments::AngleBracketed(AngleBracketedGenericArguments { + ref mut args, .. + }) => { + args.insert(0, parse_quote!('r)); + } + _ => todo!(), + } + } else { + added_lifetime = false; + } + (pair, added_lifetime) +} + +// foo::bar::baz> -> baz> +fn get_pathseg(ty: &Type) -> Option<&PathSegment> { + match ty { + Type::Path(TypePath { path, .. }) => path.segments.last(), + _ => None, + } +} + +// foo::bar::baz> -> quux<'a> +fn rightmost_path_segment(ty: &Type) -> Option<&PathSegment> { + let mut candidate = get_pathseg(ty); + loop { + if let Some(pathseg) = candidate { + if let PathArguments::AngleBracketed(AngleBracketedGenericArguments { args, .. }) = + &pathseg.arguments + { + if let Some(GenericArgument::Type(t)) = args.last() { + candidate = get_pathseg(t); + continue; + } + } + } + break; + } + candidate +} + +fn get_pathseg_mut(ty: &mut Type) -> Option<&mut PathSegment> { + match ty { + Type::Path(TypePath { path, .. }) => path.segments.last_mut(), + _ => None, + } +} + +fn has_more_mut(candidate: &Option<&mut PathSegment>) -> bool { + if let Some(PathArguments::AngleBracketed(AngleBracketedGenericArguments { + ref args, .. + })) = candidate.as_ref().map(|c| &c.arguments) + { + matches!(args.last(), Some(GenericArgument::Type(_))) + } else { + false + } +} + +fn rightmost_path_segment_mut(ty: &mut Type) -> Option<&mut PathSegment> { + let mut candidate = get_pathseg_mut(ty); + + while has_more_mut(&candidate) { + candidate = match candidate.unwrap().arguments { + PathArguments::AngleBracketed(AngleBracketedGenericArguments { + ref mut args, .. + }) => { + if let Some(GenericArgument::Type(t)) = args.last_mut() { + get_pathseg_mut(t) + } else { + unreachable!(); + } + } + _ => unreachable!(), + }; + } + + candidate +} + +fn is_not_intopy_attr(attr: &Attribute) -> bool { + let path = attr.path(); + // support #[cfg_attr(feature = "py", skip_py)] + if path.is_ident("cfg_attr") { + return match attr.parse_args_with(|input: ParseStream| { + let _: Meta = input.parse()?; + let _: Token![,] = input.parse()?; + let nested_path: Path = input.parse()?; + let _: Option = input.parse()?; + Ok(nested_path) + }) { + Ok(nested_path) => !is_intopy_attr_path(&nested_path), + Err(_) => false, + }; + } + !is_intopy_attr_path(path) +} + +fn is_intopy_attr_path(path: &Path) -> bool { + path.is_ident("skip_py") || path.is_ident("no_py_default") +} + +#[test] +fn trybuild() { + let t = trybuild::TestCases::new(); + t.pass("tests/pass/*.rs"); +} + +#[test] +fn test_is_not_intopy_attr() { + assert!(!is_not_intopy_attr(&parse_quote!(#[skip_py]))); + assert!(!is_not_intopy_attr(&parse_quote!(#[no_py_default]))); + assert!(!is_not_intopy_attr( + &parse_quote!(#[cfg_attr(foo="bar",skip_py)]) + )); + assert!(!is_not_intopy_attr( + &parse_quote!(#[cfg_attr(foo="bar",no_py_default)]) + )); + assert!(is_not_intopy_attr(&parse_quote!(#[skippy]))); + assert!(is_not_intopy_attr( + &parse_quote!(#[cfg_attr(foo="bar",skippy)]) + )); +} diff --git a/native/libcst_derive/src/inflate.rs b/native/libcst_derive/src/inflate.rs new file mode 100644 index 00000000..9a166bdc --- /dev/null +++ b/native/libcst_derive/src/inflate.rs @@ -0,0 +1,76 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use proc_macro::TokenStream; +use quote::{format_ident, quote, quote_spanned}; +use syn::{self, spanned::Spanned, Data, DataEnum, DeriveInput, Fields, FieldsUnnamed}; + +pub(crate) fn impl_inflate(ast: &DeriveInput) -> TokenStream { + match &ast.data { + Data::Enum(e) => impl_inflate_enum(ast, e), + Data::Struct(s) => quote_spanned! { + s.struct_token.span() => + compile_error!("Struct type is not supported") + } + .into(), + Data::Union(u) => quote_spanned! { + u.union_token.span() => + compile_error!("Union type is not supported") + } + .into(), + } +} + +fn impl_inflate_enum(ast: &DeriveInput, e: &DataEnum) -> TokenStream { + let mut varnames = vec![]; + for var in e.variants.iter() { + match &var.fields { + Fields::Named(n) => { + return quote_spanned! { + n.span() => + compile_error!("Named enum fields not supported") + } + .into() + } + f @ Fields::Unit => { + return quote_spanned! { + f.span() => + compile_error!("Empty enum variants not supported") + } + .into() + } + Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { + if unnamed.len() > 1 { + return quote_spanned! { + unnamed.span() => + compile_error!("Multiple unnamed fields not supported") + } + .into(); + } + varnames.push(&var.ident); + } + } + } + let ident = &ast.ident; + let generics = &ast.generics; + let ident_str = ident.to_string(); + let inflated_ident = format_ident!( + "{}", + ident_str + .strip_prefix("Deflated") + .expect("Cannot implement Inflate on a non-Deflated item") + ); + let gen = quote! { + impl#generics Inflate<'a> for #ident #generics { + type Inflated = #inflated_ident <'a>; + fn inflate(mut self, config: & crate::tokenizer::whitespace_parser::Config<'a>) -> std::result::Result { + match self { + #(Self::#varnames(x) => Ok(Self::Inflated::#varnames(x.inflate(config)?)),)* + } + } + } + }; + gen.into() +} diff --git a/native/libcst_derive/src/into_py.rs b/native/libcst_derive/src/into_py.rs new file mode 100644 index 00000000..f5470aa1 --- /dev/null +++ b/native/libcst_derive/src/into_py.rs @@ -0,0 +1,183 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use proc_macro::TokenStream; +use quote::{format_ident, quote, quote_spanned, ToTokens}; +use syn::{ + spanned::Spanned, Attribute, Data, DataEnum, DataStruct, DeriveInput, Fields, FieldsNamed, + FieldsUnnamed, Type, TypePath, Visibility, +}; + +pub(crate) fn impl_into_py(ast: &DeriveInput) -> TokenStream { + match &ast.data { + Data::Enum(e) => impl_into_py_enum(ast, e), + Data::Struct(s) => impl_into_py_struct(ast, s), + Data::Union(u) => quote_spanned! { + u.union_token.span() => + compile_error!("Union type is not supported") + } + .into(), + } +} + +fn impl_into_py_enum(ast: &DeriveInput, e: &DataEnum) -> TokenStream { + let mut toks = vec![]; + for var in e.variants.iter() { + let varname = &var.ident; + match &var.fields { + Fields::Named(n) => { + let mut fieldnames = vec![]; + for field in n.named.iter() { + if has_attr(&field.attrs, "skip_py") { + continue; + } + fieldnames.push(field.ident.as_ref().unwrap()); + } + let kwargs_toks = fields_to_kwargs(&var.fields, true); + toks.push(quote! { + Self::#varname { #(#fieldnames,)* .. } => { + use pyo3::types::PyAnyMethods; + + let libcst = pyo3::types::PyModule::import(py, "libcst")?; + let kwargs = #kwargs_toks ; + Ok(libcst + .getattr(stringify!(#varname)) + .expect(stringify!(no #varname found in libcst)) + .call((), Some(&kwargs))? + .into()) + } + }) + } + f @ Fields::Unit => { + return quote_spanned! { + f.span() => + compile_error!("Empty enum variants not supported") + } + .into() + } + Fields::Unnamed(_) => { + toks.push(quote! { + Self::#varname(x, ..) => x.try_into_py(py), + }); + } + } + } + let ident = &ast.ident; + let generics = &ast.generics; + let gen = quote! { + use pyo3::types::IntoPyDict as _; + #[automatically_derived] + impl#generics crate::nodes::traits::py::TryIntoPy for #ident #generics { + fn try_into_py(self, py: pyo3::Python) -> pyo3::PyResult { + match self { + #(#toks)* + } + } + } + }; + gen.into() +} + +fn impl_into_py_struct(ast: &DeriveInput, e: &DataStruct) -> TokenStream { + let kwargs_toks = fields_to_kwargs(&e.fields, false); + let ident = &ast.ident; + let generics = &ast.generics; + let gen = quote! { + use pyo3::types::IntoPyDict as _; + #[automatically_derived] + impl#generics crate::nodes::traits::py::TryIntoPy for #ident #generics { + fn try_into_py(self, py: pyo3::Python) -> pyo3::PyResult { + use pyo3::types::PyAnyMethods; + let libcst = pyo3::types::PyModule::import(py, "libcst")?; + let kwargs = #kwargs_toks ; + Ok(libcst + .getattr(stringify!(#ident)) + .expect(stringify!(no #ident found in libcst)) + .call((), Some(&kwargs))? + .into()) + } + } + }; + gen.into() +} + +fn fields_to_kwargs(fields: &Fields, is_enum: bool) -> quote::__private::TokenStream { + let mut empty_kwargs = false; + let mut py_varnames = vec![]; + let mut rust_varnames = vec![]; + let mut optional_py_varnames = vec![]; + let mut optional_rust_varnames = vec![]; + match &fields { + Fields::Named(FieldsNamed { named, .. }) => { + for field in named.iter() { + if has_attr(&field.attrs, "skip_py") { + continue; + } + if let Some(ident) = field.ident.as_ref() { + let include = if let Visibility::Public(_) = field.vis { + true + } else { + is_enum + }; + if include { + let pyname = format_ident!("{}", ident); + let rustname = if is_enum { + ident.to_token_stream() + } else { + quote! { self.#ident } + }; + if !has_attr(&field.attrs, "no_py_default") { + if let Type::Path(TypePath { path, .. }) = &field.ty { + if let Some(first) = path.segments.first() { + if first.ident == "Option" { + optional_py_varnames.push(pyname); + optional_rust_varnames.push(rustname); + continue; + } + } + } + } + py_varnames.push(pyname); + rust_varnames.push(rustname); + } + } + } + empty_kwargs = py_varnames.is_empty() && optional_py_varnames.is_empty() + } + Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { + if unnamed.first().is_some() { + py_varnames.push(format_ident!("value")); + rust_varnames.push(quote! { self.0 }); + } else { + empty_kwargs = true; + } + } + Fields::Unit => { + empty_kwargs = true; + } + }; + let kwargs_pairs = quote! { + #(Some((stringify!(#py_varnames), #rust_varnames.try_into_py(py)?)),)* + }; + let optional_pairs = quote! { + #(#optional_rust_varnames.map(|x| x.try_into_py(py)).transpose()?.map(|x| (stringify!(#optional_py_varnames), x)),)* + }; + if empty_kwargs { + quote! { pyo3::types::PyDict::new(py) } + } else { + quote! { + [ #kwargs_pairs #optional_pairs ] + .iter() + .filter(|x| x.is_some()) + .map(|x| x.as_ref().unwrap()) + .collect::>() + .into_py_dict(py)? + } + } +} + +fn has_attr(attrs: &[Attribute], name: &'static str) -> bool { + attrs.iter().any(|attr| attr.path().is_ident(name)) +} diff --git a/native/libcst_derive/src/lib.rs b/native/libcst_derive/src/lib.rs new file mode 100644 index 00000000..b7aafeba --- /dev/null +++ b/native/libcst_derive/src/lib.rs @@ -0,0 +1,50 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +mod inflate; +use inflate::impl_inflate; +mod parenthesized_node; +use parenthesized_node::impl_parenthesized_node; +mod codegen; +use codegen::impl_codegen; +mod into_py; +use into_py::impl_into_py; +mod cstnode; +use cstnode::{impl_cst_node, CSTNodeParams}; + +use proc_macro::TokenStream; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(Inflate)] +pub fn inflate_derive(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + impl_inflate(&ast) +} + +#[proc_macro_derive(ParenthesizedNode)] +pub fn parenthesized_node_derive(input: TokenStream) -> TokenStream { + impl_parenthesized_node(&syn::parse(input).unwrap(), false) +} + +#[proc_macro_derive(ParenthesizedDeflatedNode)] +pub fn parenthesized_deflated_node_derive(input: TokenStream) -> TokenStream { + impl_parenthesized_node(&syn::parse(input).unwrap(), true) +} + +#[proc_macro_derive(Codegen)] +pub fn codegen_derive(input: TokenStream) -> TokenStream { + impl_codegen(&syn::parse(input).unwrap()) +} + +#[proc_macro_derive(TryIntoPy, attributes(skip_py, no_py_default))] +pub fn into_py(input: TokenStream) -> TokenStream { + impl_into_py(&syn::parse(input).unwrap()) +} + +#[proc_macro_attribute] +pub fn cst_node(args: TokenStream, input: TokenStream) -> TokenStream { + let args = parse_macro_input!(args as CSTNodeParams); + impl_cst_node(parse_macro_input!(input as DeriveInput), args) +} diff --git a/native/libcst_derive/src/parenthesized_node.rs b/native/libcst_derive/src/parenthesized_node.rs new file mode 100644 index 00000000..edc4b380 --- /dev/null +++ b/native/libcst_derive/src/parenthesized_node.rs @@ -0,0 +1,130 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use proc_macro::TokenStream; +use quote::{quote, quote_spanned}; +use syn::{ + parse_quote, spanned::Spanned, Data, DataEnum, DeriveInput, Fields, FieldsUnnamed, Ident, +}; + +pub(crate) fn impl_parenthesized_node(ast: &DeriveInput, deflated: bool) -> TokenStream { + match &ast.data { + Data::Enum(e) => impl_enum(ast, e, deflated), + Data::Struct(_) => impl_struct(ast, deflated), + Data::Union(u) => quote_spanned! { + u.union_token.span() => + compile_error!("Union type is not supported") + } + .into(), + } +} + +fn idents(deflated: bool) -> (Ident, Ident, Ident) { + let treyt: Ident = if deflated { + parse_quote!(ParenthesizedDeflatedNode) + } else { + parse_quote!(ParenthesizedNode) + }; + let leftparen: Ident = if deflated { + parse_quote!(DeflatedLeftParen) + } else { + parse_quote!(LeftParen) + }; + let rightparen: Ident = if deflated { + parse_quote!(DeflatedRightParen) + } else { + parse_quote!(RightParen) + }; + (treyt, leftparen, rightparen) +} + +fn impl_struct(ast: &DeriveInput, deflated: bool) -> TokenStream { + let ident = &ast.ident; + let generics = if deflated { + parse_quote!(<'r, 'a>) + } else { + ast.generics.clone() + }; + + let (treyt, leftparen, rightparen) = idents(deflated); + let gen = quote! { + impl#generics #treyt#generics for #ident #generics { + fn lpar(&self) -> &Vec<#leftparen#generics> { + &self.lpar + } + fn rpar(&self) -> &Vec<#rightparen#generics> { + &self.rpar + } + fn with_parens(self, left: #leftparen#generics, right: #rightparen#generics) -> Self { + let mut lpar = self.lpar; + let mut rpar = self.rpar; + lpar.insert(0, left); + rpar.push(right); + #[allow(clippy::needless_update)] + Self { lpar, rpar, ..self } + } + } + }; + gen.into() +} + +fn impl_enum(ast: &DeriveInput, e: &DataEnum, deflated: bool) -> TokenStream { + let mut varnames = vec![]; + for var in e.variants.iter() { + match &var.fields { + Fields::Named(n) => { + return quote_spanned! { + n.span() => + compile_error!("Named enum fields not supported") + } + .into() + } + f @ Fields::Unit => { + return quote_spanned! { + f.span() => + compile_error!("Empty enum variants not supported") + } + .into() + } + Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { + if unnamed.len() > 1 { + return quote_spanned! { + unnamed.span() => + compile_error!("Multiple unnamed fields not supported") + } + .into(); + } + varnames.push(&var.ident); + } + } + } + let ident = &ast.ident; + let generics = if deflated { + parse_quote!(<'r, 'a>) + } else { + ast.generics.clone() + }; + let (treyt, leftparen, rightparen) = idents(deflated); + let gen = quote! { + impl#generics #treyt#generics for #ident #generics { + fn lpar(&self) -> &Vec<#leftparen#generics> { + match self { + #(Self::#varnames(x) => x.lpar(),)* + } + } + fn rpar(&self) -> &Vec<#rightparen#generics> { + match self { + #(Self::#varnames(x) => x.rpar(),)* + } + } + fn with_parens(self, left: #leftparen#generics, right: #rightparen#generics) -> Self { + match self { + #(Self::#varnames(x) => Self::#varnames(x.with_parens(left, right)),)* + } + } + } + }; + gen.into() +} diff --git a/native/libcst_derive/tests/pass/minimal_cst.rs b/native/libcst_derive/tests/pass/minimal_cst.rs new file mode 100644 index 00000000..104b2e11 --- /dev/null +++ b/native/libcst_derive/tests/pass/minimal_cst.rs @@ -0,0 +1,127 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use libcst_derive::{cst_node, Codegen}; + +pub enum Error {} + +type TokenRef<'r, 'a> = &'r &'a str; +pub type Result = std::result::Result; + +pub struct Config<'a> { + #[allow(dead_code)] + foo: &'a str, +} +pub trait Inflate<'a> +where + Self: Sized, +{ + type Inflated; + fn inflate(self, config: &Config<'a>) -> Result; +} + +impl<'a, T: Inflate<'a> + ?Sized> Inflate<'a> for Box { + type Inflated = Box; + fn inflate(self, config: &Config<'a>) -> Result { + match (*self).inflate(config) { + Ok(a) => Ok(Box::new(a)), + Err(e) => Err(e), + } + } +} + +pub struct CodegenState<'a> { + #[allow(dead_code)] + foo: &'a str, +} +pub trait Codegen<'a> { + fn codegen(&self, state: &mut CodegenState<'a>); +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct WS<'a> { + pub last_line: &'a str, +} + +#[cst_node] +pub struct Parameters<'a> { + pub params: Vec>, + pub foo: Param<'a>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedParameters<'r, 'a> { + type Inflated = Parameters<'a>; + fn inflate(self, config: &Config<'a>) -> Result { + let params = vec![]; + #[allow(clippy::blacklisted_name)] + let foo = self.foo.inflate(config)?; + Ok(Self::Inflated { params, foo }) + } +} + +#[cst_node] +pub struct Param<'a> { + pub star: Option<&'a str>, + pub(crate) star_tok: Option>, +} + +impl<'r, 'a> Inflate<'a> for DeflatedParam<'r, 'a> { + type Inflated = Param<'a>; + fn inflate(self, _config: &Config<'a>) -> Result { + Ok(Self::Inflated { star: self.star }) + } +} + +impl<'a> Codegen<'a> for Param<'a> { + fn codegen(&self, _state: &mut CodegenState<'a>) {} +} + +#[cst_node] +pub struct BitOr<'a> { + pub whitespace_before: WS<'a>, + pub whitespace_after: WS<'a>, + + pub(crate) tok: TokenRef<'a>, +} + +#[cst_node] +pub enum CompOp<'a> { + LessThan { + whitespace_before: WS<'a>, + tok: TokenRef<'a>, + }, + GreaterThan { + whitespace_after: WS<'a>, + tok: TokenRef<'a>, + }, +} + +impl<'r, 'a> Inflate<'a> for DeflatedCompOp<'r, 'a> { + type Inflated = CompOp<'a>; + fn inflate(self, _config: &Config<'a>) -> Result { + Ok(match self { + Self::LessThan { tok: _, .. } => Self::Inflated::LessThan { + whitespace_before: WS { last_line: "yo" }, + }, + Self::GreaterThan { tok: _, .. } => Self::Inflated::GreaterThan { + whitespace_after: WS { last_line: "" }, + }, + }) + } +} + +impl<'a> Codegen<'a> for CompOp<'a> { + fn codegen(&self, _state: &mut CodegenState<'a>) {} +} + +#[cst_node(Codegen)] +enum Expr<'a> { + #[allow(dead_code)] + One(Box>), + #[allow(dead_code)] + Two(CompOp<'a>), +} + +fn main() {} diff --git a/native/libcst_derive/tests/pass/simple.rs b/native/libcst_derive/tests/pass/simple.rs new file mode 100644 index 00000000..838f3914 --- /dev/null +++ b/native/libcst_derive/tests/pass/simple.rs @@ -0,0 +1,54 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree + +use libcst_derive::cst_node; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct WS<'a>(&'a str); + +type TokenRef<'r, 'a> = &'r &'a str; + +#[cst_node] +pub enum Foo<'a> { + One(One<'a>), + Two(Box>), +} + +#[cst_node] +pub struct One<'a> { + pub two: Box>, + pub header: WS<'a>, + + pub(crate) newline_tok: TokenRef<'a>, +} + +#[cst_node] +pub struct Two<'a> { + pub whitespace_before: WS<'a>, + pub(crate) tok: TokenRef<'a>, +} + +#[cst_node] +struct Thin<'a> { + pub whitespace: WS<'a>, +} + +#[cst_node] +struct Value<'a> { + pub value: &'a str, +} + +#[cst_node] +struct Empty {} + +#[cst_node] +enum Smol<'a> { + #[allow(dead_code)] + Thin(Thin<'a>), + #[allow(dead_code)] + Empty(Empty), +} + +fn main() {} diff --git a/native/roundtrip.sh b/native/roundtrip.sh new file mode 100755 index 00000000..c75241f7 --- /dev/null +++ b/native/roundtrip.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +PARSE=$(dirname $0)/target/release/parse + +exec diff -u "$1" <($PARSE < "$1") diff --git a/pyproject.toml b/pyproject.toml index c55f8d4e..f29b2474 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,131 @@ -[tool.black] -target-version = ["py36"] +[build-system] +requires = ["setuptools", "setuptools-scm", "setuptools-rust", "wheel"] -[tool.isort] -line_length = 88 -multi_line_output = 3 -include_trailing_comma = true -force_grid_wrap = 0 -lines_after_imports = 2 -combine_as_imports = true +[project] +name = "libcst" +description = "A concrete syntax tree with AST-like properties for Python 3.0 through 3.14 programs." +readme = "README.rst" +dynamic = ["version"] +license = { file = "LICENSE" } +classifiers = [ + "License :: OSI Approved :: MIT License", + "Topic :: Software Development :: Libraries", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Programming Language :: Python :: Free Threading", + "Typing :: Typed", +] +requires-python = ">=3.9" +dependencies = [ + "pyyaml>=5.2; python_version < '3.13'", + "pyyaml-ft>=8.0.0; python_version == '3.13'", + "pyyaml>=6.0.3; python_version >= '3.14'", + "typing-extensions; python_version < '3.10'", +] + +[project.urls] +Documentation = "https://libcst.readthedocs.io/en/latest/" +Github = "https://github.com/Instagram/LibCST" +Changelog = "https://github.com/Instagram/LibCST/blob/main/CHANGELOG.md" + +[dependency-groups] +dev = [ + "black==25.1.0", + "coverage[toml]>=4.5.4", + "build>=0.10.0", + "fixit==2.1.0", + "flake8==7.2.0", + "hypothesis>=4.36.0", + "hypothesmith>=0.0.4", + "maturin>=1.7.0,<1.8", + "poethepoet>=0.35.0", + "prompt-toolkit>=2.0.9", + "pyre-check==0.9.18; platform_system != 'Windows'", + "setuptools_scm>=6.0.1", + "ufmt==2.8.0", + "usort==1.0.8.post1", + "setuptools-rust>=1.5.2", + "slotscheck>=0.7.1", +] +docs = [ + {include-group = "dev"}, + "Sphinx>=5.1.1", + "sphinx-rtd-theme>=0.4.3", + "jupyter>=1.0.0", + "nbsphinx>=0.4.2", + "jinja2==3.1.6", +] + +[tool.black] +target-version = ["py39"] +extend-exclude = '^/native/' # Prepend "^/" to specify root file/folder. See https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#configuration-format + +[tool.coverage.report] +fail_under = 93 +precision = 1 +show_missing = true +skip_covered = true +omit = ["*/_parser/*"] # temporary while I remove the parser + +[tool.uv] +cache-keys = [ + { file = "pyproject.toml" }, + { git = {commit = true, tags = true}}, + { file = "**/*.rs"}, + { file = "**/Cargo.toml"}, + { file = "**/Cargo.lock"}, +] + +[tool.poe.tasks] +fixtures = ["regenerate-fixtures", "_assert_no_changes"] +regenerate-fixtures = "python scripts/regenerate-fixtures.py" +_assert_no_changes = "git diff --exit-code" + +format = "ufmt format libcst scripts" +_flake8 = "flake8 libcst" +_ufmt = "ufmt check libcst scripts" +_slotscheck = "python -m slotscheck libcst" +_check_copyright = "python scripts/check_copyright.py" +lint = ["_flake8", "_ufmt", "_slotscheck", "_check_copyright"] +test = "python -m coverage run -m libcst.tests" +typecheck = "pyre check" +docs = "sphinx-build -ab html docs/source docs/build" + +[tool.slotscheck] +exclude-modules = '^libcst\.(testing|tests)' + +[tool.ufmt] +excludes = ["native/", "stubs/"] + +[tool.cibuildwheel] +build-verbosity = 1 +environment = { PATH = "$PATH:$HOME/.cargo/bin", LIBCST_NO_LOCAL_SCHEME="1" } +skip = [ + "pp*", + "*-win32", + "*-musllinux_i686", + "*-musllinux_ppc64le", + "*-musllinux_s390x", + "*-musllinux_armv7l", +] +enable = ["cpython-freethreading"] +test-command = [ + "python --version", + "python -m libcst.tool list", + # TODO: remove the gil once thread-safety issues are resolved + "python -X gil=1 -m libcst.tool codemod remove_unused_imports.RemoveUnusedImportsCommand {project}/libcst/_nodes", +] + +[tool.cibuildwheel.linux] +environment-pass = ["LIBCST_NO_LOCAL_SCHEME"] +before-all = "yum install -y libatomic; curl https://sh.rustup.rs -sSf | env -u CARGO_HOME sh -s -- --default-toolchain stable --profile minimal -y" + +[tool.cibuildwheel.macos] +before-all = "rustup target add aarch64-apple-darwin x86_64-apple-darwin" + +[tool.cibuildwheel.windows] +before-all = "rustup target add x86_64-pc-windows-msvc i686-pc-windows-msvc aarch64-pc-windows-msvc" diff --git a/requirements-dev.txt b/requirements-dev.txt deleted file mode 100644 index 2f86e9b3..00000000 --- a/requirements-dev.txt +++ /dev/null @@ -1,15 +0,0 @@ -black==20.8b1 -codecov>=2.1.4 -coverage>=4.5.4 -fixit==0.1.1 -flake8>=3.7.8 -hypothesis>=4.36.0 -hypothesmith>=0.0.4 -git+https://github.com/jimmylai/sphinx.git@slots_type_annotation -isort==5.5.3 -jupyter>=1.0.0 -nbsphinx>=0.4.2 -pyre-check==0.0.41 -sphinx-rtd-theme>=0.4.3 -prompt-toolkit>=2.0.9 -tox>=3.18.1 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index cf8ff05c..00000000 --- a/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -dataclasses>=0.6.0; python_version < '3.7' -typing_extensions>=3.7.4.2 -typing_inspect>=0.4.0 -pyyaml>=5.2 diff --git a/scripts/check_copyright.py b/scripts/check_copyright.py new file mode 100644 index 00000000..47d90ec6 --- /dev/null +++ b/scripts/check_copyright.py @@ -0,0 +1,58 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import re +import sys +from pathlib import Path +from subprocess import run +from typing import Iterable, List, Pattern + +# Use the copyright header from this file as the benchmark for all files +EXPECTED_HEADER: str = "\n".join( + line for line in Path(__file__).read_text().splitlines()[:4] +) + +EXCEPTION_PATTERNS: List[Pattern[str]] = [ + re.compile(pattern) + for pattern in ( + r"^native/libcst/tests/fixtures/", + r"^libcst/_add_slots\.py$", + r"^libcst/tests/test_(e2e|fuzz)\.py$", + r"^libcst/_parser/base_parser\.py$", + r"^libcst/_parser/parso/utils\.py$", + r"^libcst/_parser/parso/pgen2/(generator|grammar_parser)\.py$", + r"^libcst/_parser/parso/python/(py_token|tokenize)\.py$", + r"^libcst/_parser/parso/tests/test_(fstring|tokenize|utils)\.py$", + ) +] + + +def tracked_files() -> Iterable[Path]: + proc = run( + ["git", "ls-tree", "-r", "--name-only", "HEAD"], + check=True, + capture_output=True, + encoding="utf-8", + ) + yield from ( + path + for line in proc.stdout.splitlines() + if not any(pattern.search(line) for pattern in EXCEPTION_PATTERNS) + if (path := Path(line)) and path.is_file() and path.suffix in (".py", ".sh") + ) + + +def main() -> None: + error = False + for path in tracked_files(): + content = path.read_text("utf-8") + if EXPECTED_HEADER not in content: + print(f"Missing or incomplete copyright in {path}") + error = True + sys.exit(1 if error else 0) + + +if __name__ == "__main__": + main() diff --git a/scripts/regenerate-fixtures.py b/scripts/regenerate-fixtures.py new file mode 100644 index 00000000..2b67b304 --- /dev/null +++ b/scripts/regenerate-fixtures.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Regenerate test fixtures, eg. after upgrading Pyre +""" + +import json +import os +from pathlib import Path +from subprocess import run + +from libcst.metadata import TypeInferenceProvider + + +def main() -> None: + CWD = Path.cwd() + repo_root = Path(__file__).parent.parent + test_root = repo_root / "libcst" / "tests" / "pyre" + + try: + os.chdir(test_root) + run(["pyre", "-n", "start", "--no-watchman"], check=True) + + for file_path in test_root.glob("*.py"): + json_path = file_path.with_suffix(".json") + print(f"generating {file_path} -> {json_path}") + + path_str = file_path.as_posix() + cache = TypeInferenceProvider.gen_cache(test_root, [path_str], timeout=None) + result = cache[path_str] + json_path.write_text(json.dumps(result, sort_keys=True, indent=2)) + + finally: + run(["pyre", "-n", "stop"], check=True) + os.chdir(CWD) + + +if __name__ == "__main__": + main() diff --git a/setup.py b/setup.py index 4dd024dd..12180cbc 100644 --- a/setup.py +++ b/setup.py @@ -1,43 +1,28 @@ -# Copyright (c) Facebook, Inc. and its affiliates. +# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. - -import importlib.util -from os import path -from typing import TYPE_CHECKING +from os import environ import setuptools +from setuptools_rust import Binding, RustExtension -if TYPE_CHECKING: - from importlib.machinery import ModuleSpec - from types import ModuleType +def no_local_scheme(version: str) -> str: + return "" -# Grab the readme so that our package stays in sync with github. -this_directory: str = path.abspath(path.dirname(__file__)) -with open(path.join(this_directory, "README.rst"), encoding="utf-8") as f: - long_description = f.read() - -# Grab the version constant so that libcst.tool stays in sync with this package. -spec: "ModuleSpec" = importlib.util.spec_from_file_location( - "version", path.join(this_directory, "libcst/_version.py") -) -version: "ModuleType" = importlib.util.module_from_spec(spec) -# pyre-ignore Pyre doesn't know about importlib entirely. -spec.loader.exec_module(version) -# pyre-ignore Pyre has no way of knowing that this constant exists. -LIBCST_VERSION = version.LIBCST_VERSION setuptools.setup( - name="libcst", - description="A concrete syntax tree with AST-like properties for Python 3.5, 3.6, 3.7 and 3.8 programs.", - long_description=long_description, - long_description_content_type="text/x-rst", - version=LIBCST_VERSION, - url="https://github.com/Instagram/LibCST", - license="MIT", + setup_requires=["setuptools-rust", "setuptools_scm"], + use_scm_version={ + "write_to": "libcst/_version.py", + **( + {"local_scheme": no_local_scheme} + if "LIBCST_NO_LOCAL_SCHEME" in environ + else {} + ), + }, packages=setuptools.find_packages(), package_data={ "libcst": ["py.typed"], @@ -45,17 +30,12 @@ setuptools.setup( "libcst.codemod.tests": ["*"], }, test_suite="libcst", - python_requires=">=3.6", - install_requires=[dep.strip() for dep in open("requirements.txt").readlines()], - extras_require={ - "dev": [dep.strip() for dep in open("requirements-dev.txt").readlines() if "=" in dep], - }, - classifiers=[ - "License :: OSI Approved :: MIT License", - "Topic :: Software Development :: Libraries", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", + rust_extensions=[ + RustExtension( + "libcst.native", + path="native/libcst/Cargo.toml", + binding=Binding.PyO3, + ) ], zip_safe=False, # for mypy compatibility https://mypy.readthedocs.io/en/latest/installed_packages.html ) diff --git a/stubs/hypothesis.pyi b/stubs/hypothesis.pyi index 0568b4d1..a8c27975 100644 --- a/stubs/hypothesis.pyi +++ b/stubs/hypothesis.pyi @@ -1 +1,5 @@ -# pyre-placeholder-stub +# pyre-unsafe + +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/stubs/hypothesmith.pyi b/stubs/hypothesmith.pyi index 0568b4d1..a8c27975 100644 --- a/stubs/hypothesmith.pyi +++ b/stubs/hypothesmith.pyi @@ -1 +1,5 @@ -# pyre-placeholder-stub +# pyre-unsafe + +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/stubs/libcst/native.pyi b/stubs/libcst/native.pyi new file mode 100644 index 00000000..2a84d6a7 --- /dev/null +++ b/stubs/libcst/native.pyi @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional +import libcst + +def parse_module(source: str, encoding: Optional[str]) -> libcst.Module: ... +def parse_expression(source: str) -> libcst.BaseExpression: ... +def parse_statement(source: str) -> libcst.BaseStatement: ... diff --git a/stubs/libcst_native/parser_config.pyi b/stubs/libcst_native/parser_config.pyi new file mode 100644 index 00000000..1a095cfc --- /dev/null +++ b/stubs/libcst_native/parser_config.pyi @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, FrozenSet, Mapping, Sequence + +from libcst._parser.parso.utils import PythonVersionInfo + +class BaseWhitespaceParserConfig: + def __new__( + cls, + *, + lines: Sequence[str], + default_newline: str, + ) -> BaseWhitespaceParserConfig: ... + lines: Sequence[str] + default_newline: str + +class ParserConfig(BaseWhitespaceParserConfig): + def __new__( + cls, + *, + lines: Sequence[str], + encoding: str, + default_indent: str, + default_newline: str, + has_trailing_newline: bool, + version: PythonVersionInfo, + future_imports: FrozenSet[str], + ) -> BaseWhitespaceParserConfig: ... + # lines is inherited + encoding: str + default_indent: str + # default_newline is inherited + has_trailing_newline: bool + version: PythonVersionInfo + future_imports: FrozenSet[str] + +def parser_config_asdict(config: ParserConfig) -> Mapping[str, Any]: ... diff --git a/stubs/libcst_native/token_type.pyi b/stubs/libcst_native/token_type.pyi new file mode 100644 index 00000000..b51e8b48 --- /dev/null +++ b/stubs/libcst_native/token_type.pyi @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +class TokenType: + name: str + contains_syntax: bool + +STRING: TokenType = ... +NAME: TokenType = ... +NUMBER: TokenType = ... +OP: TokenType = ... +NEWLINE: TokenType = ... +INDENT: TokenType = ... +DEDENT: TokenType = ... +ASYNC: TokenType = ... +AWAIT: TokenType = ... +FSTRING_START: TokenType = ... +FSTRING_STRING: TokenType = ... +FSTRING_END: TokenType = ... +ENDMARKER: TokenType = ... +# unused dummy tokens for backwards compat with the parso tokenizer +ERRORTOKEN: TokenType = ... +ERROR_DEDENT: TokenType = ... diff --git a/stubs/libcst_native/tokenize.pyi b/stubs/libcst_native/tokenize.pyi new file mode 100644 index 00000000..12270da5 --- /dev/null +++ b/stubs/libcst_native/tokenize.pyi @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Iterator, Optional, Tuple + +from libcst_native import token_type, whitespace_state + +class Token: + def __new__( + cls, + type: token_type.TokenType, + string: str, + start_pos: Tuple[int, int], + end_pos: Tuple[int, int], + whitespace_before: whitespace_state.WhitespaceState, + whitespace_after: whitespace_state.WhitespaceState, + relative_indent: Optional[str], + ) -> Token: ... + type: token_type.TokenType + string: str + start_pos: Tuple[int, int] + end_pos: Tuple[int, int] + whitespace_before: whitespace_state.WhitespaceState + whitespace_after: whitespace_state.WhitespaceState + relative_indent: Optional[str] + +def tokenize(text: str) -> Iterator[Token]: ... diff --git a/stubs/libcst_native/whitespace_parser.pyi b/stubs/libcst_native/whitespace_parser.pyi new file mode 100644 index 00000000..7c60189b --- /dev/null +++ b/stubs/libcst_native/whitespace_parser.pyi @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional, Sequence, Union + +from libcst._nodes.whitespace import ( + EmptyLine, + Newline, + ParenthesizedWhitespace, + SimpleWhitespace, + TrailingWhitespace, +) +from libcst._parser.types.config import BaseWhitespaceParserConfig as Config +from libcst._parser.types.whitespace_state import WhitespaceState as State + +def parse_simple_whitespace(config: Config, state: State) -> SimpleWhitespace: ... +def parse_empty_lines( + config: Config, + state: State, + *, + override_absolute_indent: Optional[str] = None, +) -> Sequence[EmptyLine]: ... +def parse_trailing_whitespace(config: Config, state: State) -> TrailingWhitespace: ... +def parse_parenthesizable_whitespace( + config: Config, state: State +) -> Union[SimpleWhitespace, ParenthesizedWhitespace]: ... diff --git a/stubs/libcst_native/whitespace_state.pyi b/stubs/libcst_native/whitespace_state.pyi new file mode 100644 index 00000000..75264a14 --- /dev/null +++ b/stubs/libcst_native/whitespace_state.pyi @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +class WhitespaceState: + def __new__( + cls, line: int, column: int, absolute_indent: str, is_parenthesized: bool + ) -> WhitespaceState: ... + line: int # one-indexed (to match parso's behavior) + column: int # zero-indexed (to match parso's behavior) + # What to look for when executing `_parse_indent`. + absolute_indent: str + is_parenthesized: bool diff --git a/stubs/setuptools.pyi b/stubs/setuptools.pyi index 0568b4d1..a8c27975 100644 --- a/stubs/setuptools.pyi +++ b/stubs/setuptools.pyi @@ -1 +1,5 @@ -# pyre-placeholder-stub +# pyre-unsafe + +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/stubs/tokenize.pyi b/stubs/tokenize.pyi index 284dbf36..32347ccc 100644 --- a/stubs/tokenize.pyi +++ b/stubs/tokenize.pyi @@ -66,7 +66,6 @@ from token import ( ) from typing import Callable, Generator, Sequence, Tuple - Hexnumber: str = ... Binnumber: str = ... Octnumber: str = ... @@ -83,7 +82,6 @@ Comment: str = ... Ignore: str = ... Name: str = ... -# pyre-ignore Pyre doesn't like that we're subclassing from tuple here class TokenInfo(Tuple[int, str, Tuple[int, int], Tuple[int, int], int]): exact_type: int = ... type: int = ... diff --git a/stubs/typing_inspect.pyi b/stubs/typing_inspect.pyi index 0568b4d1..a8c27975 100644 --- a/stubs/typing_inspect.pyi +++ b/stubs/typing_inspect.pyi @@ -1 +1,5 @@ -# pyre-placeholder-stub +# pyre-unsafe + +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/tox.ini b/tox.ini deleted file mode 100644 index a9947c2a..00000000 --- a/tox.ini +++ /dev/null @@ -1,87 +0,0 @@ -[tox] -envlist = py36, py37, py38, py39, lint, docs - -[testenv] -deps = - -rrequirements.txt - -rrequirements-dev.txt -commands = - python -m unittest {posargs} - -[testenv:lint] -deps = - -rrequirements.txt - -rrequirements-dev.txt -commands = - flake8 {posargs} - isort --check-only {posargs:.} - black --check {posargs:libcst/} - python3 -m fixit.cli.run_rules - -[testenv:docs] -deps = - -rrequirements.txt - -rrequirements-dev.txt -commands = - sphinx-build {posargs:docs/source/ docs/build/} - -[testenv:autofix] -deps = - -rrequirements.txt - -rrequirements-dev.txt -commands = - flake8 {posargs} - isort -q {posargs:.} - black {posargs:libcst/} - python3 -m fixit.cli.apply_fix - -[testenv:coverage] -deps = - -rrequirements.txt - -rrequirements-dev.txt -passenv = - CI - CIRCLECI - CIRCLE_* -commands = - coverage run setup.py test - codecov - -[testenv:fuzz36] -basepython = python3.6 -deps = - -rrequirements.txt - -rrequirements-dev.txt -setenv = - HYPOTHESIS = 1 -commands = - python3.6 -m unittest libcst/tests/test_fuzz.py - -[testenv:fuzz37] -basepython = python3.7 -deps = - -rrequirements.txt - -rrequirements-dev.txt -setenv = - HYPOTHESIS = 1 -commands = - python3.7 -m unittest libcst/tests/test_fuzz.py - -[testenv:fuzz38] -basepython = python3.8 -deps = - -rrequirements.txt - -rrequirements-dev.txt -setenv = - HYPOTHESIS = 1 -commands = - python3.8 -m unittest libcst/tests/test_fuzz.py - -[testenv:codegen] -deps = - -rrequirements.txt - -rrequirements-dev.txt -commands = - python3 -m libcst.codegen.generate visitors - python3 -m libcst.codegen.generate return_types - python3 -m libcst.codegen.generate matchers diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..f271e1ea --- /dev/null +++ b/uv.lock @@ -0,0 +1,2866 @@ +version = 1 +revision = 2 +requires-python = ">=3.9" +resolution-markers = [ + "python_full_version >= '3.14'", + "python_full_version == '3.13.*'", + "python_full_version >= '3.11' and python_full_version < '3.13'", + "python_full_version == '3.10.*'", + "python_full_version < '3.10'", +] + +[[package]] +name = "alabaster" +version = "0.7.16" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65", size = 23776, upload-time = "2024-01-10T00:56:10.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92", size = 13511, upload-time = "2024-01-10T00:56:08.388Z" }, +] + +[[package]] +name = "alabaster" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.14'", + "python_full_version == '3.13.*'", + "python_full_version >= '3.11' and python_full_version < '3.13'", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210, upload-time = "2024-07-26T18:15:03.762Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929, upload-time = "2024-07-26T18:15:02.05Z" }, +] + +[[package]] +name = "anyio" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949, upload-time = "2025-03-17T00:02:54.77Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" }, +] + +[[package]] +name = "appnope" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee", size = 4170, upload-time = "2024-02-06T09:43:11.258Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c", size = 4321, upload-time = "2024-02-06T09:43:09.663Z" }, +] + +[[package]] +name = "argon2-cffi" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argon2-cffi-bindings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/89/ce5af8a7d472a67cc819d5d998aa8c82c5d860608c4db9f46f1162d7dab9/argon2_cffi-25.1.0.tar.gz", hash = "sha256:694ae5cc8a42f4c4e2bf2ca0e64e51e23a040c6a517a85074683d3959e1346c1", size = 45706, upload-time = "2025-06-03T06:55:32.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/d3/a8b22fa575b297cd6e3e3b0155c7e25db170edf1c74783d6a31a2490b8d9/argon2_cffi-25.1.0-py3-none-any.whl", hash = "sha256:fdc8b074db390fccb6eb4a3604ae7231f219aa669a2652e0f20e16ba513d5741", size = 14657, upload-time = "2025-06-03T06:55:30.804Z" }, +] + +[[package]] +name = "argon2-cffi-bindings" +version = "21.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/e9/184b8ccce6683b0aa2fbb7ba5683ea4b9c5763f1356347f1312c32e3c66e/argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3", size = 1779911, upload-time = "2021-12-01T08:52:55.68Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d4/13/838ce2620025e9666aa8f686431f67a29052241692a3dd1ae9d3692a89d3/argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367", size = 29658, upload-time = "2021-12-01T09:09:17.016Z" }, + { url = "https://files.pythonhosted.org/packages/b3/02/f7f7bb6b6af6031edb11037639c697b912e1dea2db94d436e681aea2f495/argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d", size = 80583, upload-time = "2021-12-01T09:09:19.546Z" }, + { url = "https://files.pythonhosted.org/packages/ec/f7/378254e6dd7ae6f31fe40c8649eea7d4832a42243acaf0f1fff9083b2bed/argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae", size = 86168, upload-time = "2021-12-01T09:09:21.445Z" }, + { url = "https://files.pythonhosted.org/packages/74/f6/4a34a37a98311ed73bb80efe422fed95f2ac25a4cacc5ae1d7ae6a144505/argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c", size = 82709, upload-time = "2021-12-01T09:09:18.182Z" }, + { url = "https://files.pythonhosted.org/packages/74/2b/73d767bfdaab25484f7e7901379d5f8793cccbb86c6e0cbc4c1b96f63896/argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86", size = 83613, upload-time = "2021-12-01T09:09:22.741Z" }, + { url = "https://files.pythonhosted.org/packages/4f/fd/37f86deef67ff57c76f137a67181949c2d408077e2e3dd70c6c42912c9bf/argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f", size = 84583, upload-time = "2021-12-01T09:09:24.177Z" }, + { url = "https://files.pythonhosted.org/packages/6f/52/5a60085a3dae8fded8327a4f564223029f5f54b0cb0455a31131b5363a01/argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e", size = 88475, upload-time = "2021-12-01T09:09:26.673Z" }, + { url = "https://files.pythonhosted.org/packages/8b/95/143cd64feb24a15fa4b189a3e1e7efbaeeb00f39a51e99b26fc62fbacabd/argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082", size = 27698, upload-time = "2021-12-01T09:09:27.87Z" }, + { url = "https://files.pythonhosted.org/packages/37/2c/e34e47c7dee97ba6f01a6203e0383e15b60fb85d78ac9a15cd066f6fe28b/argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f", size = 30817, upload-time = "2021-12-01T09:09:30.267Z" }, + { url = "https://files.pythonhosted.org/packages/5a/e4/bf8034d25edaa495da3c8a3405627d2e35758e44ff6eaa7948092646fdcc/argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93", size = 53104, upload-time = "2021-12-01T09:09:31.335Z" }, +] + +[[package]] +name = "arrow" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "types-python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/00/0f6e8fcdb23ea632c866620cc872729ff43ed91d284c866b515c6342b173/arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85", size = 131960, upload-time = "2023-09-30T22:11:18.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f8/ed/e97229a566617f2ae958a6b13e7cc0f585470eac730a73e9e82c32a3cdd2/arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80", size = 66419, upload-time = "2023-09-30T22:11:16.072Z" }, +] + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978, upload-time = "2024-11-30T04:30:14.439Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918, upload-time = "2024-11-30T04:30:10.946Z" }, +] + +[[package]] +name = "async-lru" +version = "2.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/4d/71ec4d3939dc755264f680f6c2b4906423a304c3d18e96853f0a595dfe97/async_lru-2.0.5.tar.gz", hash = "sha256:481d52ccdd27275f42c43a928b4a50c3bfb2d67af4e78b170e3e0bb39c66e5bb", size = 10380, upload-time = "2025-03-16T17:25:36.919Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/49/d10027df9fce941cb8184e78a02857af36360d33e1721df81c5ed2179a1a/async_lru-2.0.5-py3-none-any.whl", hash = "sha256:ab95404d8d2605310d345932697371a5f40def0487c03d6d0ad9138de52c9943", size = 6069, upload-time = "2025-03-16T17:25:35.422Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "babel" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.13.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/e4/0c4c39e18fd76d6a628d4dd8da40543d136ce2d1752bd6eeeab0791f4d6b/beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195", size = 621067, upload-time = "2025-04-15T17:05:13.836Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b", size = 187285, upload-time = "2025-04-15T17:05:12.221Z" }, +] + +[[package]] +name = "black" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "mypy-extensions" }, + { name = "packaging" }, + { name = "pathspec" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/49/26a7b0f3f35da4b5a65f081943b7bcd22d7002f5f0fb8098ec1ff21cb6ef/black-25.1.0.tar.gz", hash = "sha256:33496d5cd1222ad73391352b4ae8da15253c5de89b93a80b3e2c8d9a19ec2666", size = 649449, upload-time = "2025-01-29T04:15:40.373Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/3b/4ba3f93ac8d90410423fdd31d7541ada9bcee1df32fb90d26de41ed40e1d/black-25.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759e7ec1e050a15f89b770cefbf91ebee8917aac5c20483bc2d80a6c3a04df32", size = 1629419, upload-time = "2025-01-29T05:37:06.642Z" }, + { url = "https://files.pythonhosted.org/packages/b4/02/0bde0485146a8a5e694daed47561785e8b77a0466ccc1f3e485d5ef2925e/black-25.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e519ecf93120f34243e6b0054db49c00a35f84f195d5bce7e9f5cfc578fc2da", size = 1461080, upload-time = "2025-01-29T05:37:09.321Z" }, + { url = "https://files.pythonhosted.org/packages/52/0e/abdf75183c830eaca7589144ff96d49bce73d7ec6ad12ef62185cc0f79a2/black-25.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:055e59b198df7ac0b7efca5ad7ff2516bca343276c466be72eb04a3bcc1f82d7", size = 1766886, upload-time = "2025-01-29T04:18:24.432Z" }, + { url = "https://files.pythonhosted.org/packages/dc/a6/97d8bb65b1d8a41f8a6736222ba0a334db7b7b77b8023ab4568288f23973/black-25.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:db8ea9917d6f8fc62abd90d944920d95e73c83a5ee3383493e35d271aca872e9", size = 1419404, upload-time = "2025-01-29T04:19:04.296Z" }, + { url = "https://files.pythonhosted.org/packages/7e/4f/87f596aca05c3ce5b94b8663dbfe242a12843caaa82dd3f85f1ffdc3f177/black-25.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a39337598244de4bae26475f77dda852ea00a93bd4c728e09eacd827ec929df0", size = 1614372, upload-time = "2025-01-29T05:37:11.71Z" }, + { url = "https://files.pythonhosted.org/packages/e7/d0/2c34c36190b741c59c901e56ab7f6e54dad8df05a6272a9747ecef7c6036/black-25.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96c1c7cd856bba8e20094e36e0f948718dc688dba4a9d78c3adde52b9e6c2299", size = 1442865, upload-time = "2025-01-29T05:37:14.309Z" }, + { url = "https://files.pythonhosted.org/packages/21/d4/7518c72262468430ead45cf22bd86c883a6448b9eb43672765d69a8f1248/black-25.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce2e264d59c91e52d8000d507eb20a9aca4a778731a08cfff7e5ac4a4bb7096", size = 1749699, upload-time = "2025-01-29T04:18:17.688Z" }, + { url = "https://files.pythonhosted.org/packages/58/db/4f5beb989b547f79096e035c4981ceb36ac2b552d0ac5f2620e941501c99/black-25.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:172b1dbff09f86ce6f4eb8edf9dede08b1fce58ba194c87d7a4f1a5aa2f5b3c2", size = 1428028, upload-time = "2025-01-29T04:18:51.711Z" }, + { url = "https://files.pythonhosted.org/packages/83/71/3fe4741df7adf015ad8dfa082dd36c94ca86bb21f25608eb247b4afb15b2/black-25.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b60580e829091e6f9238c848ea6750efed72140b91b048770b64e74fe04908b", size = 1650988, upload-time = "2025-01-29T05:37:16.707Z" }, + { url = "https://files.pythonhosted.org/packages/13/f3/89aac8a83d73937ccd39bbe8fc6ac8860c11cfa0af5b1c96d081facac844/black-25.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e2978f6df243b155ef5fa7e558a43037c3079093ed5d10fd84c43900f2d8ecc", size = 1453985, upload-time = "2025-01-29T05:37:18.273Z" }, + { url = "https://files.pythonhosted.org/packages/6f/22/b99efca33f1f3a1d2552c714b1e1b5ae92efac6c43e790ad539a163d1754/black-25.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b48735872ec535027d979e8dcb20bf4f70b5ac75a8ea99f127c106a7d7aba9f", size = 1783816, upload-time = "2025-01-29T04:18:33.823Z" }, + { url = "https://files.pythonhosted.org/packages/18/7e/a27c3ad3822b6f2e0e00d63d58ff6299a99a5b3aee69fa77cd4b0076b261/black-25.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:ea0213189960bda9cf99be5b8c8ce66bb054af5e9e861249cd23471bd7b0b3ba", size = 1440860, upload-time = "2025-01-29T04:19:12.944Z" }, + { url = "https://files.pythonhosted.org/packages/98/87/0edf98916640efa5d0696e1abb0a8357b52e69e82322628f25bf14d263d1/black-25.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f0b18a02996a836cc9c9c78e5babec10930862827b1b724ddfe98ccf2f2fe4f", size = 1650673, upload-time = "2025-01-29T05:37:20.574Z" }, + { url = "https://files.pythonhosted.org/packages/52/e5/f7bf17207cf87fa6e9b676576749c6b6ed0d70f179a3d812c997870291c3/black-25.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afebb7098bfbc70037a053b91ae8437c3857482d3a690fefc03e9ff7aa9a5fd3", size = 1453190, upload-time = "2025-01-29T05:37:22.106Z" }, + { url = "https://files.pythonhosted.org/packages/e3/ee/adda3d46d4a9120772fae6de454c8495603c37c4c3b9c60f25b1ab6401fe/black-25.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:030b9759066a4ee5e5aca28c3c77f9c64789cdd4de8ac1df642c40b708be6171", size = 1782926, upload-time = "2025-01-29T04:18:58.564Z" }, + { url = "https://files.pythonhosted.org/packages/cc/64/94eb5f45dcb997d2082f097a3944cfc7fe87e071907f677e80788a2d7b7a/black-25.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:a22f402b410566e2d1c950708c77ebf5ebd5d0d88a6a2e87c86d9fb48afa0d18", size = 1442613, upload-time = "2025-01-29T04:19:27.63Z" }, + { url = "https://files.pythonhosted.org/packages/d3/b6/ae7507470a4830dbbfe875c701e84a4a5fb9183d1497834871a715716a92/black-25.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1ee0a0c330f7b5130ce0caed9936a904793576ef4d2b98c40835d6a65afa6a0", size = 1628593, upload-time = "2025-01-29T05:37:23.672Z" }, + { url = "https://files.pythonhosted.org/packages/24/c1/ae36fa59a59f9363017ed397750a0cd79a470490860bc7713967d89cdd31/black-25.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3df5f1bf91d36002b0a75389ca8663510cf0531cca8aa5c1ef695b46d98655f", size = 1460000, upload-time = "2025-01-29T05:37:25.829Z" }, + { url = "https://files.pythonhosted.org/packages/ac/b6/98f832e7a6c49aa3a464760c67c7856363aa644f2f3c74cf7d624168607e/black-25.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9e6827d563a2c820772b32ce8a42828dc6790f095f441beef18f96aa6f8294e", size = 1765963, upload-time = "2025-01-29T04:18:38.116Z" }, + { url = "https://files.pythonhosted.org/packages/ce/e9/2cb0a017eb7024f70e0d2e9bdb8c5a5b078c5740c7f8816065d06f04c557/black-25.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:bacabb307dca5ebaf9c118d2d2f6903da0d62c9faa82bd21a33eecc319559355", size = 1419419, upload-time = "2025-01-29T04:18:30.191Z" }, + { url = "https://files.pythonhosted.org/packages/09/71/54e999902aed72baf26bca0d50781b01838251a462612966e9fc4891eadd/black-25.1.0-py3-none-any.whl", hash = "sha256:95e8176dae143ba9097f351d174fdaf0ccd29efb414b362ae3fd72bf0f710717", size = 207646, upload-time = "2025-01-29T04:15:38.082Z" }, +] + +[[package]] +name = "bleach" +version = "6.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/9a/0e33f5054c54d349ea62c277191c020c2d6ef1d65ab2cb1993f91ec846d1/bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f", size = 203083, upload-time = "2024-10-29T18:30:40.477Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/55/96142937f66150805c25c4d0f31ee4132fd33497753400734f9dfdcbdc66/bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e", size = 163406, upload-time = "2024-10-29T18:30:38.186Z" }, +] + +[package.optional-dependencies] +css = [ + { name = "tinycss2" }, +] + +[[package]] +name = "build" +version = "1.2.2.post1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "os_name == 'nt'" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10.2'" }, + { name = "packaging" }, + { name = "pyproject-hooks" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/46/aeab111f8e06793e4f0e421fcad593d547fb8313b50990f31681ee2fb1ad/build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7", size = 46701, upload-time = "2024-10-06T17:22:25.251Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/c2/80633736cd183ee4a62107413def345f7e6e3c01563dbca1417363cf957e/build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5", size = 22950, upload-time = "2024-10-06T17:22:23.299Z" }, +] + +[[package]] +name = "certifi" +version = "2025.4.26" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/9e/c05b3920a3b7d20d3d3310465f50348e5b3694f4f88c6daf736eef3024c4/certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6", size = 160705, upload-time = "2025-04-26T02:12:29.51Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/7e/3db2bd1b1f9e95f7cddca6d6e75e2f2bd9f51b1246e546d88addca0106bd/certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3", size = 159618, upload-time = "2025-04-26T02:12:27.662Z" }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/07/f44ca684db4e4f08a3fdc6eeb9a0d15dc6883efc7b8c90357fdbf74e186c/cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", size = 182191, upload-time = "2024-09-04T20:43:30.027Z" }, + { url = "https://files.pythonhosted.org/packages/08/fd/cc2fedbd887223f9f5d170c96e57cbf655df9831a6546c1727ae13fa977a/cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", size = 178592, upload-time = "2024-09-04T20:43:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/de/cc/4635c320081c78d6ffc2cab0a76025b691a91204f4aa317d568ff9280a2d/cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", size = 426024, upload-time = "2024-09-04T20:43:34.186Z" }, + { url = "https://files.pythonhosted.org/packages/b6/7b/3b2b250f3aab91abe5f8a51ada1b717935fdaec53f790ad4100fe2ec64d1/cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", size = 448188, upload-time = "2024-09-04T20:43:36.286Z" }, + { url = "https://files.pythonhosted.org/packages/d3/48/1b9283ebbf0ec065148d8de05d647a986c5f22586b18120020452fff8f5d/cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", size = 455571, upload-time = "2024-09-04T20:43:38.586Z" }, + { url = "https://files.pythonhosted.org/packages/40/87/3b8452525437b40f39ca7ff70276679772ee7e8b394934ff60e63b7b090c/cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", size = 436687, upload-time = "2024-09-04T20:43:40.084Z" }, + { url = "https://files.pythonhosted.org/packages/8d/fb/4da72871d177d63649ac449aec2e8a29efe0274035880c7af59101ca2232/cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", size = 446211, upload-time = "2024-09-04T20:43:41.526Z" }, + { url = "https://files.pythonhosted.org/packages/ab/a0/62f00bcb411332106c02b663b26f3545a9ef136f80d5df746c05878f8c4b/cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", size = 461325, upload-time = "2024-09-04T20:43:43.117Z" }, + { url = "https://files.pythonhosted.org/packages/36/83/76127035ed2e7e27b0787604d99da630ac3123bfb02d8e80c633f218a11d/cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", size = 438784, upload-time = "2024-09-04T20:43:45.256Z" }, + { url = "https://files.pythonhosted.org/packages/21/81/a6cd025db2f08ac88b901b745c163d884641909641f9b826e8cb87645942/cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", size = 461564, upload-time = "2024-09-04T20:43:46.779Z" }, + { url = "https://files.pythonhosted.org/packages/f8/fe/4d41c2f200c4a457933dbd98d3cf4e911870877bd94d9656cc0fcb390681/cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", size = 171804, upload-time = "2024-09-04T20:43:48.186Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b6/0b0f5ab93b0df4acc49cae758c81fe4e5ef26c3ae2e10cc69249dfd8b3ab/cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", size = 181299, upload-time = "2024-09-04T20:43:49.812Z" }, + { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264, upload-time = "2024-09-04T20:43:51.124Z" }, + { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651, upload-time = "2024-09-04T20:43:52.872Z" }, + { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259, upload-time = "2024-09-04T20:43:56.123Z" }, + { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200, upload-time = "2024-09-04T20:43:57.891Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235, upload-time = "2024-09-04T20:44:00.18Z" }, + { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721, upload-time = "2024-09-04T20:44:01.585Z" }, + { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242, upload-time = "2024-09-04T20:44:03.467Z" }, + { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999, upload-time = "2024-09-04T20:44:05.023Z" }, + { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242, upload-time = "2024-09-04T20:44:06.444Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604, upload-time = "2024-09-04T20:44:08.206Z" }, + { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727, upload-time = "2024-09-04T20:44:09.481Z" }, + { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400, upload-time = "2024-09-04T20:44:10.873Z" }, + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, + { url = "https://files.pythonhosted.org/packages/b9/ea/8bb50596b8ffbc49ddd7a1ad305035daa770202a6b782fc164647c2673ad/cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", size = 182220, upload-time = "2024-09-04T20:45:01.577Z" }, + { url = "https://files.pythonhosted.org/packages/ae/11/e77c8cd24f58285a82c23af484cf5b124a376b32644e445960d1a4654c3a/cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", size = 178605, upload-time = "2024-09-04T20:45:03.837Z" }, + { url = "https://files.pythonhosted.org/packages/ed/65/25a8dc32c53bf5b7b6c2686b42ae2ad58743f7ff644844af7cdb29b49361/cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", size = 424910, upload-time = "2024-09-04T20:45:05.315Z" }, + { url = "https://files.pythonhosted.org/packages/42/7a/9d086fab7c66bd7c4d0f27c57a1b6b068ced810afc498cc8c49e0088661c/cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", size = 447200, upload-time = "2024-09-04T20:45:06.903Z" }, + { url = "https://files.pythonhosted.org/packages/da/63/1785ced118ce92a993b0ec9e0d0ac8dc3e5dbfbcaa81135be56c69cabbb6/cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", size = 454565, upload-time = "2024-09-04T20:45:08.975Z" }, + { url = "https://files.pythonhosted.org/packages/74/06/90b8a44abf3556599cdec107f7290277ae8901a58f75e6fe8f970cd72418/cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", size = 435635, upload-time = "2024-09-04T20:45:10.64Z" }, + { url = "https://files.pythonhosted.org/packages/bd/62/a1f468e5708a70b1d86ead5bab5520861d9c7eacce4a885ded9faa7729c3/cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", size = 445218, upload-time = "2024-09-04T20:45:12.366Z" }, + { url = "https://files.pythonhosted.org/packages/5b/95/b34462f3ccb09c2594aa782d90a90b045de4ff1f70148ee79c69d37a0a5a/cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", size = 460486, upload-time = "2024-09-04T20:45:13.935Z" }, + { url = "https://files.pythonhosted.org/packages/fc/fc/a1e4bebd8d680febd29cf6c8a40067182b64f00c7d105f8f26b5bc54317b/cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", size = 437911, upload-time = "2024-09-04T20:45:15.696Z" }, + { url = "https://files.pythonhosted.org/packages/e6/c3/21cab7a6154b6a5ea330ae80de386e7665254835b9e98ecc1340b3a7de9a/cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", size = 460632, upload-time = "2024-09-04T20:45:17.284Z" }, + { url = "https://files.pythonhosted.org/packages/cb/b5/fd9f8b5a84010ca169ee49f4e4ad6f8c05f4e3545b72ee041dbbcb159882/cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", size = 171820, upload-time = "2024-09-04T20:45:18.762Z" }, + { url = "https://files.pythonhosted.org/packages/8c/52/b08750ce0bce45c143e1b5d7357ee8c55341b52bdef4b0f081af1eb248c2/cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", size = 181290, upload-time = "2024-09-04T20:45:20.226Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/28/9901804da60055b406e1a1c5ba7aac1276fb77f1dde635aabfc7fd84b8ab/charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941", size = 201818, upload-time = "2025-05-02T08:31:46.725Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9b/892a8c8af9110935e5adcbb06d9c6fe741b6bb02608c6513983048ba1a18/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd", size = 144649, upload-time = "2025-05-02T08:31:48.889Z" }, + { url = "https://files.pythonhosted.org/packages/7b/a5/4179abd063ff6414223575e008593861d62abfc22455b5d1a44995b7c101/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6", size = 155045, upload-time = "2025-05-02T08:31:50.757Z" }, + { url = "https://files.pythonhosted.org/packages/3b/95/bc08c7dfeddd26b4be8c8287b9bb055716f31077c8b0ea1cd09553794665/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d", size = 147356, upload-time = "2025-05-02T08:31:52.634Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/7a5b635aa65284bf3eab7653e8b4151ab420ecbae918d3e359d1947b4d61/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86", size = 149471, upload-time = "2025-05-02T08:31:56.207Z" }, + { url = "https://files.pythonhosted.org/packages/ae/38/51fc6ac74251fd331a8cfdb7ec57beba8c23fd5493f1050f71c87ef77ed0/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c", size = 151317, upload-time = "2025-05-02T08:31:57.613Z" }, + { url = "https://files.pythonhosted.org/packages/b7/17/edee1e32215ee6e9e46c3e482645b46575a44a2d72c7dfd49e49f60ce6bf/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0", size = 146368, upload-time = "2025-05-02T08:31:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/26/2c/ea3e66f2b5f21fd00b2825c94cafb8c326ea6240cd80a91eb09e4a285830/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef", size = 154491, upload-time = "2025-05-02T08:32:01.219Z" }, + { url = "https://files.pythonhosted.org/packages/52/47/7be7fa972422ad062e909fd62460d45c3ef4c141805b7078dbab15904ff7/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6", size = 157695, upload-time = "2025-05-02T08:32:03.045Z" }, + { url = "https://files.pythonhosted.org/packages/2f/42/9f02c194da282b2b340f28e5fb60762de1151387a36842a92b533685c61e/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366", size = 154849, upload-time = "2025-05-02T08:32:04.651Z" }, + { url = "https://files.pythonhosted.org/packages/67/44/89cacd6628f31fb0b63201a618049be4be2a7435a31b55b5eb1c3674547a/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db", size = 150091, upload-time = "2025-05-02T08:32:06.719Z" }, + { url = "https://files.pythonhosted.org/packages/1f/79/4b8da9f712bc079c0f16b6d67b099b0b8d808c2292c937f267d816ec5ecc/charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a", size = 98445, upload-time = "2025-05-02T08:32:08.66Z" }, + { url = "https://files.pythonhosted.org/packages/7d/d7/96970afb4fb66497a40761cdf7bd4f6fca0fc7bafde3a84f836c1f57a926/charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509", size = 105782, upload-time = "2025-05-02T08:32:10.46Z" }, + { url = "https://files.pythonhosted.org/packages/05/85/4c40d00dcc6284a1c1ad5de5e0996b06f39d8232f1031cd23c2f5c07ee86/charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2", size = 198794, upload-time = "2025-05-02T08:32:11.945Z" }, + { url = "https://files.pythonhosted.org/packages/41/d9/7a6c0b9db952598e97e93cbdfcb91bacd89b9b88c7c983250a77c008703c/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645", size = 142846, upload-time = "2025-05-02T08:32:13.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/82/a37989cda2ace7e37f36c1a8ed16c58cf48965a79c2142713244bf945c89/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd", size = 153350, upload-time = "2025-05-02T08:32:15.873Z" }, + { url = "https://files.pythonhosted.org/packages/df/68/a576b31b694d07b53807269d05ec3f6f1093e9545e8607121995ba7a8313/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8", size = 145657, upload-time = "2025-05-02T08:32:17.283Z" }, + { url = "https://files.pythonhosted.org/packages/92/9b/ad67f03d74554bed3aefd56fe836e1623a50780f7c998d00ca128924a499/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f", size = 147260, upload-time = "2025-05-02T08:32:18.807Z" }, + { url = "https://files.pythonhosted.org/packages/a6/e6/8aebae25e328160b20e31a7e9929b1578bbdc7f42e66f46595a432f8539e/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7", size = 149164, upload-time = "2025-05-02T08:32:20.333Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f2/b3c2f07dbcc248805f10e67a0262c93308cfa149a4cd3d1fe01f593e5fd2/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9", size = 144571, upload-time = "2025-05-02T08:32:21.86Z" }, + { url = "https://files.pythonhosted.org/packages/60/5b/c3f3a94bc345bc211622ea59b4bed9ae63c00920e2e8f11824aa5708e8b7/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544", size = 151952, upload-time = "2025-05-02T08:32:23.434Z" }, + { url = "https://files.pythonhosted.org/packages/e2/4d/ff460c8b474122334c2fa394a3f99a04cf11c646da895f81402ae54f5c42/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82", size = 155959, upload-time = "2025-05-02T08:32:24.993Z" }, + { url = "https://files.pythonhosted.org/packages/a2/2b/b964c6a2fda88611a1fe3d4c400d39c66a42d6c169c924818c848f922415/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0", size = 153030, upload-time = "2025-05-02T08:32:26.435Z" }, + { url = "https://files.pythonhosted.org/packages/59/2e/d3b9811db26a5ebf444bc0fa4f4be5aa6d76fc6e1c0fd537b16c14e849b6/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5", size = 148015, upload-time = "2025-05-02T08:32:28.376Z" }, + { url = "https://files.pythonhosted.org/packages/90/07/c5fd7c11eafd561bb51220d600a788f1c8d77c5eef37ee49454cc5c35575/charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a", size = 98106, upload-time = "2025-05-02T08:32:30.281Z" }, + { url = "https://files.pythonhosted.org/packages/a8/05/5e33dbef7e2f773d672b6d79f10ec633d4a71cd96db6673625838a4fd532/charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28", size = 105402, upload-time = "2025-05-02T08:32:32.191Z" }, + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/28/f8/dfb01ff6cc9af38552c69c9027501ff5a5117c4cc18dcd27cb5259fa1888/charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4", size = 201671, upload-time = "2025-05-02T08:34:12.696Z" }, + { url = "https://files.pythonhosted.org/packages/32/fb/74e26ee556a9dbfe3bd264289b67be1e6d616329403036f6507bb9f3f29c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7", size = 144744, upload-time = "2025-05-02T08:34:14.665Z" }, + { url = "https://files.pythonhosted.org/packages/ad/06/8499ee5aa7addc6f6d72e068691826ff093329fe59891e83b092ae4c851c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836", size = 154993, upload-time = "2025-05-02T08:34:17.134Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a2/5e4c187680728219254ef107a6949c60ee0e9a916a5dadb148c7ae82459c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597", size = 147382, upload-time = "2025-05-02T08:34:19.081Z" }, + { url = "https://files.pythonhosted.org/packages/4c/fe/56aca740dda674f0cc1ba1418c4d84534be51f639b5f98f538b332dc9a95/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7", size = 149536, upload-time = "2025-05-02T08:34:21.073Z" }, + { url = "https://files.pythonhosted.org/packages/53/13/db2e7779f892386b589173dd689c1b1e304621c5792046edd8a978cbf9e0/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f", size = 151349, upload-time = "2025-05-02T08:34:23.193Z" }, + { url = "https://files.pythonhosted.org/packages/69/35/e52ab9a276186f729bce7a0638585d2982f50402046e4b0faa5d2c3ef2da/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba", size = 146365, upload-time = "2025-05-02T08:34:25.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/d8/af7333f732fc2e7635867d56cb7c349c28c7094910c72267586947561b4b/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12", size = 154499, upload-time = "2025-05-02T08:34:27.359Z" }, + { url = "https://files.pythonhosted.org/packages/7a/3d/a5b2e48acef264d71e036ff30bcc49e51bde80219bb628ba3e00cf59baac/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518", size = 157735, upload-time = "2025-05-02T08:34:29.798Z" }, + { url = "https://files.pythonhosted.org/packages/85/d8/23e2c112532a29f3eef374375a8684a4f3b8e784f62b01da931186f43494/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5", size = 154786, upload-time = "2025-05-02T08:34:31.858Z" }, + { url = "https://files.pythonhosted.org/packages/c7/57/93e0169f08ecc20fe82d12254a200dfaceddc1c12a4077bf454ecc597e33/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3", size = 150203, upload-time = "2025-05-02T08:34:33.88Z" }, + { url = "https://files.pythonhosted.org/packages/2c/9d/9bf2b005138e7e060d7ebdec7503d0ef3240141587651f4b445bdf7286c2/charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471", size = 98436, upload-time = "2025-05-02T08:34:35.907Z" }, + { url = "https://files.pythonhosted.org/packages/6d/24/5849d46cf4311bbf21b424c443b09b459f5b436b1558c04e45dbb7cc478b/charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e", size = 105772, upload-time = "2025-05-02T08:34:37.935Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.14'", + "python_full_version == '3.13.*'", + "python_full_version >= '3.11' and python_full_version < '3.13'", + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "comm" +version = "0.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/a8/fb783cb0abe2b5fded9f55e5703015cdf1c9c85b3669087c538dd15a6a86/comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e", size = 6210, upload-time = "2024-03-12T16:53:41.133Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/75/49e5bfe642f71f272236b5b2d2691cf915a7283cc0ceda56357b61daa538/comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3", size = 7180, upload-time = "2024-03-12T16:53:39.226Z" }, +] + +[[package]] +name = "coverage" +version = "7.8.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/07/998afa4a0ecdf9b1981ae05415dad2d4e7716e1b1f00abbd91691ac09ac9/coverage-7.8.2.tar.gz", hash = "sha256:a886d531373a1f6ff9fad2a2ba4a045b68467b779ae729ee0b3b10ac20033b27", size = 812759, upload-time = "2025-05-23T11:39:57.856Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/6b/7dd06399a5c0b81007e3a6af0395cd60e6a30f959f8d407d3ee04642e896/coverage-7.8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bd8ec21e1443fd7a447881332f7ce9d35b8fbd2849e761bb290b584535636b0a", size = 211573, upload-time = "2025-05-23T11:37:47.207Z" }, + { url = "https://files.pythonhosted.org/packages/f0/df/2b24090820a0bac1412955fb1a4dade6bc3b8dcef7b899c277ffaf16916d/coverage-7.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4c26c2396674816deaeae7ded0e2b42c26537280f8fe313335858ffff35019be", size = 212006, upload-time = "2025-05-23T11:37:50.289Z" }, + { url = "https://files.pythonhosted.org/packages/c5/c4/e4e3b998e116625562a872a342419652fa6ca73f464d9faf9f52f1aff427/coverage-7.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1aec326ed237e5880bfe69ad41616d333712c7937bcefc1343145e972938f9b3", size = 241128, upload-time = "2025-05-23T11:37:52.229Z" }, + { url = "https://files.pythonhosted.org/packages/b1/67/b28904afea3e87a895da850ba587439a61699bf4b73d04d0dfd99bbd33b4/coverage-7.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e818796f71702d7a13e50c70de2a1924f729228580bcba1607cccf32eea46e6", size = 239026, upload-time = "2025-05-23T11:37:53.846Z" }, + { url = "https://files.pythonhosted.org/packages/8c/0f/47bf7c5630d81bc2cd52b9e13043685dbb7c79372a7f5857279cc442b37c/coverage-7.8.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:546e537d9e24efc765c9c891328f30f826e3e4808e31f5d0f87c4ba12bbd1622", size = 240172, upload-time = "2025-05-23T11:37:55.711Z" }, + { url = "https://files.pythonhosted.org/packages/ba/38/af3eb9d36d85abc881f5aaecf8209383dbe0fa4cac2d804c55d05c51cb04/coverage-7.8.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ab9b09a2349f58e73f8ebc06fac546dd623e23b063e5398343c5270072e3201c", size = 240086, upload-time = "2025-05-23T11:37:57.724Z" }, + { url = "https://files.pythonhosted.org/packages/9e/64/c40c27c2573adeba0fe16faf39a8aa57368a1f2148865d6bb24c67eadb41/coverage-7.8.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fd51355ab8a372d89fb0e6a31719e825cf8df8b6724bee942fb5b92c3f016ba3", size = 238792, upload-time = "2025-05-23T11:37:59.737Z" }, + { url = "https://files.pythonhosted.org/packages/8e/ab/b7c85146f15457671c1412afca7c25a5696d7625e7158002aa017e2d7e3c/coverage-7.8.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0774df1e093acb6c9e4d58bce7f86656aeed6c132a16e2337692c12786b32404", size = 239096, upload-time = "2025-05-23T11:38:01.693Z" }, + { url = "https://files.pythonhosted.org/packages/d3/50/9446dad1310905fb1dc284d60d4320a5b25d4e3e33f9ea08b8d36e244e23/coverage-7.8.2-cp310-cp310-win32.whl", hash = "sha256:00f2e2f2e37f47e5f54423aeefd6c32a7dbcedc033fcd3928a4f4948e8b96af7", size = 214144, upload-time = "2025-05-23T11:38:03.68Z" }, + { url = "https://files.pythonhosted.org/packages/23/ed/792e66ad7b8b0df757db8d47af0c23659cdb5a65ef7ace8b111cacdbee89/coverage-7.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:145b07bea229821d51811bf15eeab346c236d523838eda395ea969d120d13347", size = 215043, upload-time = "2025-05-23T11:38:05.217Z" }, + { url = "https://files.pythonhosted.org/packages/6a/4d/1ff618ee9f134d0de5cc1661582c21a65e06823f41caf801aadf18811a8e/coverage-7.8.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b99058eef42e6a8dcd135afb068b3d53aff3921ce699e127602efff9956457a9", size = 211692, upload-time = "2025-05-23T11:38:08.485Z" }, + { url = "https://files.pythonhosted.org/packages/96/fa/c3c1b476de96f2bc7a8ca01a9f1fcb51c01c6b60a9d2c3e66194b2bdb4af/coverage-7.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5feb7f2c3e6ea94d3b877def0270dff0947b8d8c04cfa34a17be0a4dc1836879", size = 212115, upload-time = "2025-05-23T11:38:09.989Z" }, + { url = "https://files.pythonhosted.org/packages/f7/c2/5414c5a1b286c0f3881ae5adb49be1854ac5b7e99011501f81c8c1453065/coverage-7.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:670a13249b957bb9050fab12d86acef7bf8f6a879b9d1a883799276e0d4c674a", size = 244740, upload-time = "2025-05-23T11:38:11.947Z" }, + { url = "https://files.pythonhosted.org/packages/cd/46/1ae01912dfb06a642ef3dd9cf38ed4996fda8fe884dab8952da616f81a2b/coverage-7.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0bdc8bf760459a4a4187b452213e04d039990211f98644c7292adf1e471162b5", size = 242429, upload-time = "2025-05-23T11:38:13.955Z" }, + { url = "https://files.pythonhosted.org/packages/06/58/38c676aec594bfe2a87c7683942e5a30224791d8df99bcc8439fde140377/coverage-7.8.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07a989c867986c2a75f158f03fdb413128aad29aca9d4dbce5fc755672d96f11", size = 244218, upload-time = "2025-05-23T11:38:15.631Z" }, + { url = "https://files.pythonhosted.org/packages/80/0c/95b1023e881ce45006d9abc250f76c6cdab7134a1c182d9713878dfefcb2/coverage-7.8.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2db10dedeb619a771ef0e2949ccba7b75e33905de959c2643a4607bef2f3fb3a", size = 243865, upload-time = "2025-05-23T11:38:17.622Z" }, + { url = "https://files.pythonhosted.org/packages/57/37/0ae95989285a39e0839c959fe854a3ae46c06610439350d1ab860bf020ac/coverage-7.8.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e6ea7dba4e92926b7b5f0990634b78ea02f208d04af520c73a7c876d5a8d36cb", size = 242038, upload-time = "2025-05-23T11:38:19.966Z" }, + { url = "https://files.pythonhosted.org/packages/4d/82/40e55f7c0eb5e97cc62cbd9d0746fd24e8caf57be5a408b87529416e0c70/coverage-7.8.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ef2f22795a7aca99fc3c84393a55a53dd18ab8c93fb431004e4d8f0774150f54", size = 242567, upload-time = "2025-05-23T11:38:21.912Z" }, + { url = "https://files.pythonhosted.org/packages/f9/35/66a51adc273433a253989f0d9cc7aa6bcdb4855382cf0858200afe578861/coverage-7.8.2-cp311-cp311-win32.whl", hash = "sha256:641988828bc18a6368fe72355df5f1703e44411adbe49bba5644b941ce6f2e3a", size = 214194, upload-time = "2025-05-23T11:38:23.571Z" }, + { url = "https://files.pythonhosted.org/packages/f6/8f/a543121f9f5f150eae092b08428cb4e6b6d2d134152c3357b77659d2a605/coverage-7.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:8ab4a51cb39dc1933ba627e0875046d150e88478dbe22ce145a68393e9652975", size = 215109, upload-time = "2025-05-23T11:38:25.137Z" }, + { url = "https://files.pythonhosted.org/packages/77/65/6cc84b68d4f35186463cd7ab1da1169e9abb59870c0f6a57ea6aba95f861/coverage-7.8.2-cp311-cp311-win_arm64.whl", hash = "sha256:8966a821e2083c74d88cca5b7dcccc0a3a888a596a04c0b9668a891de3a0cc53", size = 213521, upload-time = "2025-05-23T11:38:27.123Z" }, + { url = "https://files.pythonhosted.org/packages/8d/2a/1da1ada2e3044fcd4a3254fb3576e160b8fe5b36d705c8a31f793423f763/coverage-7.8.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e2f6fe3654468d061942591aef56686131335b7a8325684eda85dacdf311356c", size = 211876, upload-time = "2025-05-23T11:38:29.01Z" }, + { url = "https://files.pythonhosted.org/packages/70/e9/3d715ffd5b6b17a8be80cd14a8917a002530a99943cc1939ad5bb2aa74b9/coverage-7.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76090fab50610798cc05241bf83b603477c40ee87acd358b66196ab0ca44ffa1", size = 212130, upload-time = "2025-05-23T11:38:30.675Z" }, + { url = "https://files.pythonhosted.org/packages/a0/02/fdce62bb3c21649abfd91fbdcf041fb99be0d728ff00f3f9d54d97ed683e/coverage-7.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bd0a0a5054be160777a7920b731a0570284db5142abaaf81bcbb282b8d99279", size = 246176, upload-time = "2025-05-23T11:38:32.395Z" }, + { url = "https://files.pythonhosted.org/packages/a7/52/decbbed61e03b6ffe85cd0fea360a5e04a5a98a7423f292aae62423b8557/coverage-7.8.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da23ce9a3d356d0affe9c7036030b5c8f14556bd970c9b224f9c8205505e3b99", size = 243068, upload-time = "2025-05-23T11:38:33.989Z" }, + { url = "https://files.pythonhosted.org/packages/38/6c/d0e9c0cce18faef79a52778219a3c6ee8e336437da8eddd4ab3dbd8fadff/coverage-7.8.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9392773cffeb8d7e042a7b15b82a414011e9d2b5fdbbd3f7e6a6b17d5e21b20", size = 245328, upload-time = "2025-05-23T11:38:35.568Z" }, + { url = "https://files.pythonhosted.org/packages/f0/70/f703b553a2f6b6c70568c7e398ed0789d47f953d67fbba36a327714a7bca/coverage-7.8.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:876cbfd0b09ce09d81585d266c07a32657beb3eaec896f39484b631555be0fe2", size = 245099, upload-time = "2025-05-23T11:38:37.627Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fb/4cbb370dedae78460c3aacbdad9d249e853f3bc4ce5ff0e02b1983d03044/coverage-7.8.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3da9b771c98977a13fbc3830f6caa85cae6c9c83911d24cb2d218e9394259c57", size = 243314, upload-time = "2025-05-23T11:38:39.238Z" }, + { url = "https://files.pythonhosted.org/packages/39/9f/1afbb2cb9c8699b8bc38afdce00a3b4644904e6a38c7bf9005386c9305ec/coverage-7.8.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9a990f6510b3292686713bfef26d0049cd63b9c7bb17e0864f133cbfd2e6167f", size = 244489, upload-time = "2025-05-23T11:38:40.845Z" }, + { url = "https://files.pythonhosted.org/packages/79/fa/f3e7ec7d220bff14aba7a4786ae47043770cbdceeea1803083059c878837/coverage-7.8.2-cp312-cp312-win32.whl", hash = "sha256:bf8111cddd0f2b54d34e96613e7fbdd59a673f0cf5574b61134ae75b6f5a33b8", size = 214366, upload-time = "2025-05-23T11:38:43.551Z" }, + { url = "https://files.pythonhosted.org/packages/54/aa/9cbeade19b7e8e853e7ffc261df885d66bf3a782c71cba06c17df271f9e6/coverage-7.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:86a323a275e9e44cdf228af9b71c5030861d4d2610886ab920d9945672a81223", size = 215165, upload-time = "2025-05-23T11:38:45.148Z" }, + { url = "https://files.pythonhosted.org/packages/c4/73/e2528bf1237d2448f882bbebaec5c3500ef07301816c5c63464b9da4d88a/coverage-7.8.2-cp312-cp312-win_arm64.whl", hash = "sha256:820157de3a589e992689ffcda8639fbabb313b323d26388d02e154164c57b07f", size = 213548, upload-time = "2025-05-23T11:38:46.74Z" }, + { url = "https://files.pythonhosted.org/packages/1a/93/eb6400a745ad3b265bac36e8077fdffcf0268bdbbb6c02b7220b624c9b31/coverage-7.8.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ea561010914ec1c26ab4188aef8b1567272ef6de096312716f90e5baa79ef8ca", size = 211898, upload-time = "2025-05-23T11:38:49.066Z" }, + { url = "https://files.pythonhosted.org/packages/1b/7c/bdbf113f92683024406a1cd226a199e4200a2001fc85d6a6e7e299e60253/coverage-7.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cb86337a4fcdd0e598ff2caeb513ac604d2f3da6d53df2c8e368e07ee38e277d", size = 212171, upload-time = "2025-05-23T11:38:51.207Z" }, + { url = "https://files.pythonhosted.org/packages/91/22/594513f9541a6b88eb0dba4d5da7d71596dadef6b17a12dc2c0e859818a9/coverage-7.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26a4636ddb666971345541b59899e969f3b301143dd86b0ddbb570bd591f1e85", size = 245564, upload-time = "2025-05-23T11:38:52.857Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f4/2860fd6abeebd9f2efcfe0fd376226938f22afc80c1943f363cd3c28421f/coverage-7.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5040536cf9b13fb033f76bcb5e1e5cb3b57c4807fef37db9e0ed129c6a094257", size = 242719, upload-time = "2025-05-23T11:38:54.529Z" }, + { url = "https://files.pythonhosted.org/packages/89/60/f5f50f61b6332451520e6cdc2401700c48310c64bc2dd34027a47d6ab4ca/coverage-7.8.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc67994df9bcd7e0150a47ef41278b9e0a0ea187caba72414b71dc590b99a108", size = 244634, upload-time = "2025-05-23T11:38:57.326Z" }, + { url = "https://files.pythonhosted.org/packages/3b/70/7f4e919039ab7d944276c446b603eea84da29ebcf20984fb1fdf6e602028/coverage-7.8.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e6c86888fd076d9e0fe848af0a2142bf606044dc5ceee0aa9eddb56e26895a0", size = 244824, upload-time = "2025-05-23T11:38:59.421Z" }, + { url = "https://files.pythonhosted.org/packages/26/45/36297a4c0cea4de2b2c442fe32f60c3991056c59cdc3cdd5346fbb995c97/coverage-7.8.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:684ca9f58119b8e26bef860db33524ae0365601492e86ba0b71d513f525e7050", size = 242872, upload-time = "2025-05-23T11:39:01.049Z" }, + { url = "https://files.pythonhosted.org/packages/a4/71/e041f1b9420f7b786b1367fa2a375703889ef376e0d48de9f5723fb35f11/coverage-7.8.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8165584ddedb49204c4e18da083913bdf6a982bfb558632a79bdaadcdafd0d48", size = 244179, upload-time = "2025-05-23T11:39:02.709Z" }, + { url = "https://files.pythonhosted.org/packages/bd/db/3c2bf49bdc9de76acf2491fc03130c4ffc51469ce2f6889d2640eb563d77/coverage-7.8.2-cp313-cp313-win32.whl", hash = "sha256:34759ee2c65362163699cc917bdb2a54114dd06d19bab860725f94ef45a3d9b7", size = 214393, upload-time = "2025-05-23T11:39:05.457Z" }, + { url = "https://files.pythonhosted.org/packages/c6/dc/947e75d47ebbb4b02d8babb1fad4ad381410d5bc9da7cfca80b7565ef401/coverage-7.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:2f9bc608fbafaee40eb60a9a53dbfb90f53cc66d3d32c2849dc27cf5638a21e3", size = 215194, upload-time = "2025-05-23T11:39:07.171Z" }, + { url = "https://files.pythonhosted.org/packages/90/31/a980f7df8a37eaf0dc60f932507fda9656b3a03f0abf188474a0ea188d6d/coverage-7.8.2-cp313-cp313-win_arm64.whl", hash = "sha256:9fe449ee461a3b0c7105690419d0b0aba1232f4ff6d120a9e241e58a556733f7", size = 213580, upload-time = "2025-05-23T11:39:08.862Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6a/25a37dd90f6c95f59355629417ebcb74e1c34e38bb1eddf6ca9b38b0fc53/coverage-7.8.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8369a7c8ef66bded2b6484053749ff220dbf83cba84f3398c84c51a6f748a008", size = 212734, upload-time = "2025-05-23T11:39:11.109Z" }, + { url = "https://files.pythonhosted.org/packages/36/8b/3a728b3118988725f40950931abb09cd7f43b3c740f4640a59f1db60e372/coverage-7.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:159b81df53a5fcbc7d45dae3adad554fdbde9829a994e15227b3f9d816d00b36", size = 212959, upload-time = "2025-05-23T11:39:12.751Z" }, + { url = "https://files.pythonhosted.org/packages/53/3c/212d94e6add3a3c3f412d664aee452045ca17a066def8b9421673e9482c4/coverage-7.8.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6fcbbd35a96192d042c691c9e0c49ef54bd7ed865846a3c9d624c30bb67ce46", size = 257024, upload-time = "2025-05-23T11:39:15.569Z" }, + { url = "https://files.pythonhosted.org/packages/a4/40/afc03f0883b1e51bbe804707aae62e29c4e8c8bbc365c75e3e4ddeee9ead/coverage-7.8.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05364b9cc82f138cc86128dc4e2e1251c2981a2218bfcd556fe6b0fbaa3501be", size = 252867, upload-time = "2025-05-23T11:39:17.64Z" }, + { url = "https://files.pythonhosted.org/packages/18/a2/3699190e927b9439c6ded4998941a3c1d6fa99e14cb28d8536729537e307/coverage-7.8.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46d532db4e5ff3979ce47d18e2fe8ecad283eeb7367726da0e5ef88e4fe64740", size = 255096, upload-time = "2025-05-23T11:39:19.328Z" }, + { url = "https://files.pythonhosted.org/packages/b4/06/16e3598b9466456b718eb3e789457d1a5b8bfb22e23b6e8bbc307df5daf0/coverage-7.8.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4000a31c34932e7e4fa0381a3d6deb43dc0c8f458e3e7ea6502e6238e10be625", size = 256276, upload-time = "2025-05-23T11:39:21.077Z" }, + { url = "https://files.pythonhosted.org/packages/a7/d5/4b5a120d5d0223050a53d2783c049c311eea1709fa9de12d1c358e18b707/coverage-7.8.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:43ff5033d657cd51f83015c3b7a443287250dc14e69910577c3e03bd2e06f27b", size = 254478, upload-time = "2025-05-23T11:39:22.838Z" }, + { url = "https://files.pythonhosted.org/packages/ba/85/f9ecdb910ecdb282b121bfcaa32fa8ee8cbd7699f83330ee13ff9bbf1a85/coverage-7.8.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:94316e13f0981cbbba132c1f9f365cac1d26716aaac130866ca812006f662199", size = 255255, upload-time = "2025-05-23T11:39:24.644Z" }, + { url = "https://files.pythonhosted.org/packages/50/63/2d624ac7d7ccd4ebbd3c6a9eba9d7fc4491a1226071360d59dd84928ccb2/coverage-7.8.2-cp313-cp313t-win32.whl", hash = "sha256:3f5673888d3676d0a745c3d0e16da338c5eea300cb1f4ada9c872981265e76d8", size = 215109, upload-time = "2025-05-23T11:39:26.722Z" }, + { url = "https://files.pythonhosted.org/packages/22/5e/7053b71462e970e869111c1853afd642212568a350eba796deefdfbd0770/coverage-7.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:2c08b05ee8d7861e45dc5a2cc4195c8c66dca5ac613144eb6ebeaff2d502e73d", size = 216268, upload-time = "2025-05-23T11:39:28.429Z" }, + { url = "https://files.pythonhosted.org/packages/07/69/afa41aa34147655543dbe96994f8a246daf94b361ccf5edfd5df62ce066a/coverage-7.8.2-cp313-cp313t-win_arm64.whl", hash = "sha256:1e1448bb72b387755e1ff3ef1268a06617afd94188164960dba8d0245a46004b", size = 214071, upload-time = "2025-05-23T11:39:30.55Z" }, + { url = "https://files.pythonhosted.org/packages/71/1e/388267ad9c6aa126438acc1ceafede3bb746afa9872e3ec5f0691b7d5efa/coverage-7.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:496948261eaac5ac9cf43f5d0a9f6eb7a6d4cb3bedb2c5d294138142f5c18f2a", size = 211566, upload-time = "2025-05-23T11:39:32.333Z" }, + { url = "https://files.pythonhosted.org/packages/8f/a5/acc03e5cf0bba6357f5e7c676343de40fbf431bb1e115fbebf24b2f7f65e/coverage-7.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eacd2de0d30871eff893bab0b67840a96445edcb3c8fd915e6b11ac4b2f3fa6d", size = 211996, upload-time = "2025-05-23T11:39:34.512Z" }, + { url = "https://files.pythonhosted.org/packages/5b/a2/0fc0a9f6b7c24fa4f1d7210d782c38cb0d5e692666c36eaeae9a441b6755/coverage-7.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b039ffddc99ad65d5078ef300e0c7eed08c270dc26570440e3ef18beb816c1ca", size = 240741, upload-time = "2025-05-23T11:39:36.252Z" }, + { url = "https://files.pythonhosted.org/packages/e6/da/1c6ba2cf259710eed8916d4fd201dccc6be7380ad2b3b9f63ece3285d809/coverage-7.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e49824808d4375ede9dd84e9961a59c47f9113039f1a525e6be170aa4f5c34d", size = 238672, upload-time = "2025-05-23T11:39:38.03Z" }, + { url = "https://files.pythonhosted.org/packages/ac/51/c8fae0dc3ca421e6e2509503696f910ff333258db672800c3bdef256265a/coverage-7.8.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b069938961dfad881dc2f8d02b47645cd2f455d3809ba92a8a687bf513839787", size = 239769, upload-time = "2025-05-23T11:39:40.24Z" }, + { url = "https://files.pythonhosted.org/packages/59/8e/b97042ae92c59f40be0c989df090027377ba53f2d6cef73c9ca7685c26a6/coverage-7.8.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:de77c3ba8bb686d1c411e78ee1b97e6e0b963fb98b1637658dd9ad2c875cf9d7", size = 239555, upload-time = "2025-05-23T11:39:42.3Z" }, + { url = "https://files.pythonhosted.org/packages/47/35/b8893e682d6e96b1db2af5997fc13ef62219426fb17259d6844c693c5e00/coverage-7.8.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1676628065a498943bd3f64f099bb573e08cf1bc6088bbe33cf4424e0876f4b3", size = 237768, upload-time = "2025-05-23T11:39:44.069Z" }, + { url = "https://files.pythonhosted.org/packages/03/6c/023b0b9a764cb52d6243a4591dcb53c4caf4d7340445113a1f452bb80591/coverage-7.8.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8e1a26e7e50076e35f7afafde570ca2b4d7900a491174ca357d29dece5aacee7", size = 238757, upload-time = "2025-05-23T11:39:46.195Z" }, + { url = "https://files.pythonhosted.org/packages/03/ed/3af7e4d721bd61a8df7de6de9e8a4271e67f3d9e086454558fd9f48eb4f6/coverage-7.8.2-cp39-cp39-win32.whl", hash = "sha256:6782a12bf76fa61ad9350d5a6ef5f3f020b57f5e6305cbc663803f2ebd0f270a", size = 214166, upload-time = "2025-05-23T11:39:47.934Z" }, + { url = "https://files.pythonhosted.org/packages/9d/30/ee774b626773750dc6128354884652507df3c59d6aa8431526107e595227/coverage-7.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:1efa4166ba75ccefd647f2d78b64f53f14fb82622bc94c5a5cb0a622f50f1c9e", size = 215050, upload-time = "2025-05-23T11:39:50.252Z" }, + { url = "https://files.pythonhosted.org/packages/69/2f/572b29496d8234e4a7773200dd835a0d32d9e171f2d974f3fe04a9dbc271/coverage-7.8.2-pp39.pp310.pp311-none-any.whl", hash = "sha256:ec455eedf3ba0bbdf8f5a570012617eb305c63cb9f03428d39bf544cb2b94837", size = 203636, upload-time = "2025-05-23T11:39:52.002Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1a/0b9c32220ad694d66062f571cc5cedfa9997b64a591e8a500bb63de1bd40/coverage-7.8.2-py3-none-any.whl", hash = "sha256:726f32ee3713f7359696331a18daf0c3b3a70bb0ae71141b9d3c52be7c595e32", size = 203623, upload-time = "2025-05-23T11:39:53.846Z" }, +] + +[package.optional-dependencies] +toml = [ + { name = "tomli", marker = "python_full_version <= '3.11'" }, +] + +[[package]] +name = "dataclasses-json" +version = "0.6.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "marshmallow" }, + { name = "typing-inspect" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/a4/f71d9cf3a5ac257c993b5ca3f93df5f7fb395c725e7f1e6479d2514173c3/dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0", size = 32227, upload-time = "2024-06-09T16:20:19.103Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/be/d0d44e092656fe7a06b55e6103cbce807cdbdee17884a5367c68c9860853/dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a", size = 28686, upload-time = "2024-06-09T16:20:16.715Z" }, +] + +[[package]] +name = "debugpy" +version = "1.8.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/75/087fe07d40f490a78782ff3b0a30e3968936854105487decdb33446d4b0e/debugpy-1.8.14.tar.gz", hash = "sha256:7cd287184318416850aa8b60ac90105837bb1e59531898c07569d197d2ed5322", size = 1641444, upload-time = "2025-04-10T19:46:10.981Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/df/156df75a41aaebd97cee9d3870fe68f8001b6c1c4ca023e221cfce69bece/debugpy-1.8.14-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:93fee753097e85623cab1c0e6a68c76308cd9f13ffdf44127e6fab4fbf024339", size = 2076510, upload-time = "2025-04-10T19:46:13.315Z" }, + { url = "https://files.pythonhosted.org/packages/69/cd/4fc391607bca0996db5f3658762106e3d2427beaef9bfd363fd370a3c054/debugpy-1.8.14-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d937d93ae4fa51cdc94d3e865f535f185d5f9748efb41d0d49e33bf3365bd79", size = 3559614, upload-time = "2025-04-10T19:46:14.647Z" }, + { url = "https://files.pythonhosted.org/packages/1a/42/4e6d2b9d63e002db79edfd0cb5656f1c403958915e0e73ab3e9220012eec/debugpy-1.8.14-cp310-cp310-win32.whl", hash = "sha256:c442f20577b38cc7a9aafecffe1094f78f07fb8423c3dddb384e6b8f49fd2987", size = 5208588, upload-time = "2025-04-10T19:46:16.233Z" }, + { url = "https://files.pythonhosted.org/packages/97/b1/cc9e4e5faadc9d00df1a64a3c2d5c5f4b9df28196c39ada06361c5141f89/debugpy-1.8.14-cp310-cp310-win_amd64.whl", hash = "sha256:f117dedda6d969c5c9483e23f573b38f4e39412845c7bc487b6f2648df30fe84", size = 5241043, upload-time = "2025-04-10T19:46:17.768Z" }, + { url = "https://files.pythonhosted.org/packages/67/e8/57fe0c86915671fd6a3d2d8746e40485fd55e8d9e682388fbb3a3d42b86f/debugpy-1.8.14-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:1b2ac8c13b2645e0b1eaf30e816404990fbdb168e193322be8f545e8c01644a9", size = 2175064, upload-time = "2025-04-10T19:46:19.486Z" }, + { url = "https://files.pythonhosted.org/packages/3b/97/2b2fd1b1c9569c6764ccdb650a6f752e4ac31be465049563c9eb127a8487/debugpy-1.8.14-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf431c343a99384ac7eab2f763980724834f933a271e90496944195318c619e2", size = 3132359, upload-time = "2025-04-10T19:46:21.192Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ee/b825c87ed06256ee2a7ed8bab8fb3bb5851293bf9465409fdffc6261c426/debugpy-1.8.14-cp311-cp311-win32.whl", hash = "sha256:c99295c76161ad8d507b413cd33422d7c542889fbb73035889420ac1fad354f2", size = 5133269, upload-time = "2025-04-10T19:46:23.047Z" }, + { url = "https://files.pythonhosted.org/packages/d5/a6/6c70cd15afa43d37839d60f324213843174c1d1e6bb616bd89f7c1341bac/debugpy-1.8.14-cp311-cp311-win_amd64.whl", hash = "sha256:7816acea4a46d7e4e50ad8d09d963a680ecc814ae31cdef3622eb05ccacf7b01", size = 5158156, upload-time = "2025-04-10T19:46:24.521Z" }, + { url = "https://files.pythonhosted.org/packages/d9/2a/ac2df0eda4898f29c46eb6713a5148e6f8b2b389c8ec9e425a4a1d67bf07/debugpy-1.8.14-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:8899c17920d089cfa23e6005ad9f22582fd86f144b23acb9feeda59e84405b84", size = 2501268, upload-time = "2025-04-10T19:46:26.044Z" }, + { url = "https://files.pythonhosted.org/packages/10/53/0a0cb5d79dd9f7039169f8bf94a144ad3efa52cc519940b3b7dde23bcb89/debugpy-1.8.14-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6bb5c0dcf80ad5dbc7b7d6eac484e2af34bdacdf81df09b6a3e62792b722826", size = 4221077, upload-time = "2025-04-10T19:46:27.464Z" }, + { url = "https://files.pythonhosted.org/packages/f8/d5/84e01821f362327bf4828728aa31e907a2eca7c78cd7c6ec062780d249f8/debugpy-1.8.14-cp312-cp312-win32.whl", hash = "sha256:281d44d248a0e1791ad0eafdbbd2912ff0de9eec48022a5bfbc332957487ed3f", size = 5255127, upload-time = "2025-04-10T19:46:29.467Z" }, + { url = "https://files.pythonhosted.org/packages/33/16/1ed929d812c758295cac7f9cf3dab5c73439c83d9091f2d91871e648093e/debugpy-1.8.14-cp312-cp312-win_amd64.whl", hash = "sha256:5aa56ef8538893e4502a7d79047fe39b1dae08d9ae257074c6464a7b290b806f", size = 5297249, upload-time = "2025-04-10T19:46:31.538Z" }, + { url = "https://files.pythonhosted.org/packages/4d/e4/395c792b243f2367d84202dc33689aa3d910fb9826a7491ba20fc9e261f5/debugpy-1.8.14-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:329a15d0660ee09fec6786acdb6e0443d595f64f5d096fc3e3ccf09a4259033f", size = 2485676, upload-time = "2025-04-10T19:46:32.96Z" }, + { url = "https://files.pythonhosted.org/packages/ba/f1/6f2ee3f991327ad9e4c2f8b82611a467052a0fb0e247390192580e89f7ff/debugpy-1.8.14-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f920c7f9af409d90f5fd26e313e119d908b0dd2952c2393cd3247a462331f15", size = 4217514, upload-time = "2025-04-10T19:46:34.336Z" }, + { url = "https://files.pythonhosted.org/packages/79/28/b9d146f8f2dc535c236ee09ad3e5ac899adb39d7a19b49f03ac95d216beb/debugpy-1.8.14-cp313-cp313-win32.whl", hash = "sha256:3784ec6e8600c66cbdd4ca2726c72d8ca781e94bce2f396cc606d458146f8f4e", size = 5254756, upload-time = "2025-04-10T19:46:36.199Z" }, + { url = "https://files.pythonhosted.org/packages/e0/62/a7b4a57013eac4ccaef6977966e6bec5c63906dd25a86e35f155952e29a1/debugpy-1.8.14-cp313-cp313-win_amd64.whl", hash = "sha256:684eaf43c95a3ec39a96f1f5195a7ff3d4144e4a18d69bb66beeb1a6de605d6e", size = 5297119, upload-time = "2025-04-10T19:46:38.141Z" }, + { url = "https://files.pythonhosted.org/packages/85/6f/96ba96545f55b6a675afa08c96b42810de9b18c7ad17446bbec82762127a/debugpy-1.8.14-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:413512d35ff52c2fb0fd2d65e69f373ffd24f0ecb1fac514c04a668599c5ce7f", size = 2077696, upload-time = "2025-04-10T19:46:46.817Z" }, + { url = "https://files.pythonhosted.org/packages/fa/84/f378a2dd837d94de3c85bca14f1db79f8fcad7e20b108b40d59da56a6d22/debugpy-1.8.14-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c9156f7524a0d70b7a7e22b2e311d8ba76a15496fb00730e46dcdeedb9e1eea", size = 3554846, upload-time = "2025-04-10T19:46:48.72Z" }, + { url = "https://files.pythonhosted.org/packages/db/52/88824fe5d6893f59933f664c6e12783749ab537a2101baf5c713164d8aa2/debugpy-1.8.14-cp39-cp39-win32.whl", hash = "sha256:b44985f97cc3dd9d52c42eb59ee9d7ee0c4e7ecd62bca704891f997de4cef23d", size = 5209350, upload-time = "2025-04-10T19:46:50.284Z" }, + { url = "https://files.pythonhosted.org/packages/41/35/72e9399be24a04cb72cfe1284572c9fcd1d742c7fa23786925c18fa54ad8/debugpy-1.8.14-cp39-cp39-win_amd64.whl", hash = "sha256:b1528cfee6c1b1c698eb10b6b096c598738a8238822d218173d21c3086de8123", size = 5241852, upload-time = "2025-04-10T19:46:52.022Z" }, + { url = "https://files.pythonhosted.org/packages/97/1a/481f33c37ee3ac8040d3d51fc4c4e4e7e61cb08b8bc8971d6032acc2279f/debugpy-1.8.14-py2.py3-none-any.whl", hash = "sha256:5cd9a579d553b6cb9759a7908a41988ee6280b961f24f63336835d9418216a20", size = 5256230, upload-time = "2025-04-10T19:46:54.077Z" }, +] + +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/d5/c66da9b79e5bdb124974bfe172b4daf3c984ebd9c2a06e2b8a4dc7331c72/defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69", size = 75520, upload-time = "2021-03-08T10:59:26.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604, upload-time = "2021-03-08T10:59:24.45Z" }, +] + +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444, upload-time = "2024-04-23T18:57:18.24Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408, upload-time = "2024-04-23T18:57:14.835Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, +] + +[[package]] +name = "executing" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693, upload-time = "2025-01-22T15:41:29.403Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702, upload-time = "2025-01-22T15:41:25.929Z" }, +] + +[[package]] +name = "fastjsonschema" +version = "2.21.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8b/50/4b769ce1ac4071a1ef6d86b1a3fb56cdc3a37615e8c5519e1af96cdac366/fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4", size = 373939, upload-time = "2024-12-02T10:55:15.133Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/2b/0817a2b257fe88725c25589d89aec060581aabf668707a8d03b2e9e0cb2a/fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667", size = 23924, upload-time = "2024-12-02T10:55:07.599Z" }, +] + +[[package]] +name = "fixit" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "libcst" }, + { name = "moreorless" }, + { name = "packaging" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "trailrunner" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/22/fc513f039c17024fde3fe2ebe3bc93e4972f7717694613b1bc109068bfc1/fixit-2.1.0.tar.gz", hash = "sha256:b31665cb6491d659d8dfef5a6078a7e9f786e299826636d03d6bd91b6f71e95b", size = 219817, upload-time = "2023-10-26T02:37:14.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/91/635a1d52f36a546449031c63e54220c8a71e898bcd9cbccfe1181fc1812c/fixit-2.1.0-py3-none-any.whl", hash = "sha256:76b286c0abb9d6a63e5c7d1b6673a041c4356e93d70472e94a9ad2c447da7753", size = 83583, upload-time = "2023-10-26T02:37:12.574Z" }, +] + +[[package]] +name = "flake8" +version = "7.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mccabe" }, + { name = "pycodestyle" }, + { name = "pyflakes" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e7/c4/5842fc9fc94584c455543540af62fd9900faade32511fab650e9891ec225/flake8-7.2.0.tar.gz", hash = "sha256:fa558ae3f6f7dbf2b4f22663e5343b6b6023620461f8d4ff2019ef4b5ee70426", size = 48177, upload-time = "2025-03-29T20:08:39.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/5c/0627be4c9976d56b1217cb5187b7504e7fd7d3503f8bfd312a04077bd4f7/flake8-7.2.0-py2.py3-none-any.whl", hash = "sha256:93b92ba5bdb60754a6da14fa3b93a9361fd00a59632ada61fd7b130436c40343", size = 57786, upload-time = "2025-03-29T20:08:37.902Z" }, +] + +[[package]] +name = "fqdn" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/3e/a80a8c077fd798951169626cde3e239adeba7dab75deb3555716415bd9b0/fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f", size = 6015, upload-time = "2021-03-11T07:16:29.08Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/58/8acf1b3e91c58313ce5cb67df61001fc9dcd21be4fadb76c1a2d540e09ed/fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014", size = 9121, upload-time = "2021-03-11T07:16:28.351Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "hypothesis" +version = "6.135.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "sortedcontainers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/59/7022ef95715701cd90ac0cf04582e3507492ab200f370fd7ef12d80dda75/hypothesis-6.135.4.tar.gz", hash = "sha256:c63f6fc56840558c5c5e2441dd91fad1709da60bde756b816d4b89944e50a52f", size = 451895, upload-time = "2025-06-09T02:31:38.766Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/d4/25b3a9f35199eb1904967ca3e6db4afd636911fa39695760b0afac84f38a/hypothesis-6.135.4-py3-none-any.whl", hash = "sha256:6a3b13ce35d43e14aaf6a6ca4cc411e5342be5d05b77977499d07cf6a61e6e71", size = 517950, upload-time = "2025-06-09T02:31:34.463Z" }, +] + +[package.optional-dependencies] +lark = [ + { name = "lark" }, +] + +[[package]] +name = "hypothesmith" +version = "0.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "hypothesis", extra = ["lark"] }, + { name = "libcst" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e3/f6/1a64114dee6c46985482c35bdbc12025db59973a0225eec47ac4d306030f/hypothesmith-0.3.3.tar.gz", hash = "sha256:96c14802d6c8e85d8975264176878db54b28d2ed921fdbfedc2e6b8ce3c81716", size = 25529, upload-time = "2024-02-16T20:21:24.511Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/bc/78dcf42c6eaaf7d628f061f1e533a596f5bca2a53be2b714adc5d370d48e/hypothesmith-0.3.3-py3-none-any.whl", hash = "sha256:fdb0172f9de97d09450da40da7da083fdd118bcd2f88b1a2289413d2d496b1b1", size = 19247, upload-time = "2024-02-16T20:20:47.059Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026, upload-time = "2022-07-01T12:21:05.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769, upload-time = "2022-07-01T12:21:02.467Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "intervaltree" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sortedcontainers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/fb/396d568039d21344639db96d940d40eb62befe704ef849b27949ded5c3bb/intervaltree-3.1.0.tar.gz", hash = "sha256:902b1b88936918f9b2a19e0e5eb7ccb430ae45cde4f39ea4b36932920d33952d", size = 32861, upload-time = "2020-08-03T08:01:11.392Z" } + +[[package]] +name = "ipykernel" +version = "6.29.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "appnope", marker = "sys_platform == 'darwin'" }, + { name = "comm" }, + { name = "debugpy" }, + { name = "ipython", version = "8.18.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "ipython", version = "8.37.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "ipython", version = "9.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "matplotlib-inline" }, + { name = "nest-asyncio" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/5c/67594cb0c7055dc50814b21731c22a601101ea3b1b50a9a1b090e11f5d0f/ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215", size = 163367, upload-time = "2024-07-01T14:07:22.543Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/5c/368ae6c01c7628438358e6d337c19b05425727fbb221d2a3c4303c372f42/ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5", size = 117173, upload-time = "2024-07-01T14:07:19.603Z" }, +] + +[[package]] +name = "ipython" +version = "8.18.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, + { name = "decorator", marker = "python_full_version < '3.10'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.10'" }, + { name = "jedi", marker = "python_full_version < '3.10'" }, + { name = "matplotlib-inline", marker = "python_full_version < '3.10'" }, + { name = "pexpect", marker = "python_full_version < '3.10' and sys_platform != 'win32'" }, + { name = "prompt-toolkit", marker = "python_full_version < '3.10'" }, + { name = "pygments", marker = "python_full_version < '3.10'" }, + { name = "stack-data", marker = "python_full_version < '3.10'" }, + { name = "traitlets", marker = "python_full_version < '3.10'" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/b9/3ba6c45a6df813c09a48bac313c22ff83efa26cbb55011218d925a46e2ad/ipython-8.18.1.tar.gz", hash = "sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27", size = 5486330, upload-time = "2023-11-27T09:58:34.596Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/6b/d9fdcdef2eb6a23f391251fde8781c38d42acd82abe84d054cb74f7863b0/ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397", size = 808161, upload-time = "2023-11-27T09:58:30.538Z" }, +] + +[[package]] +name = "ipython" +version = "8.37.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version == '3.10.*' and sys_platform == 'win32'" }, + { name = "decorator", marker = "python_full_version == '3.10.*'" }, + { name = "exceptiongroup", marker = "python_full_version == '3.10.*'" }, + { name = "jedi", marker = "python_full_version == '3.10.*'" }, + { name = "matplotlib-inline", marker = "python_full_version == '3.10.*'" }, + { name = "pexpect", marker = "python_full_version == '3.10.*' and sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit", marker = "python_full_version == '3.10.*'" }, + { name = "pygments", marker = "python_full_version == '3.10.*'" }, + { name = "stack-data", marker = "python_full_version == '3.10.*'" }, + { name = "traitlets", marker = "python_full_version == '3.10.*'" }, + { name = "typing-extensions", marker = "python_full_version == '3.10.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/31/10ac88f3357fc276dc8a64e8880c82e80e7459326ae1d0a211b40abf6665/ipython-8.37.0.tar.gz", hash = "sha256:ca815841e1a41a1e6b73a0b08f3038af9b2252564d01fc405356d34033012216", size = 5606088, upload-time = "2025-05-31T16:39:09.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/d0/274fbf7b0b12643cbbc001ce13e6a5b1607ac4929d1b11c72460152c9fc3/ipython-8.37.0-py3-none-any.whl", hash = "sha256:ed87326596b878932dbcb171e3e698845434d8c61b8d8cd474bf663041a9dcf2", size = 831864, upload-time = "2025-05-31T16:39:06.38Z" }, +] + +[[package]] +name = "ipython" +version = "9.3.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.14'", + "python_full_version == '3.13.*'", + "python_full_version >= '3.11' and python_full_version < '3.13'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" }, + { name = "decorator", marker = "python_full_version >= '3.11'" }, + { name = "ipython-pygments-lexers", marker = "python_full_version >= '3.11'" }, + { name = "jedi", marker = "python_full_version >= '3.11'" }, + { name = "matplotlib-inline", marker = "python_full_version >= '3.11'" }, + { name = "pexpect", marker = "python_full_version >= '3.11' and sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit", marker = "python_full_version >= '3.11'" }, + { name = "pygments", marker = "python_full_version >= '3.11'" }, + { name = "stack-data", marker = "python_full_version >= '3.11'" }, + { name = "traitlets", marker = "python_full_version >= '3.11'" }, + { name = "typing-extensions", marker = "python_full_version == '3.11.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/09/4c7e06b96fbd203e06567b60fb41b06db606b6a82db6db7b2c85bb72a15c/ipython-9.3.0.tar.gz", hash = "sha256:79eb896f9f23f50ad16c3bc205f686f6e030ad246cc309c6279a242b14afe9d8", size = 4426460, upload-time = "2025-05-31T16:34:55.678Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/99/9ed3d52d00f1846679e3aa12e2326ac7044b5e7f90dc822b60115fa533ca/ipython-9.3.0-py3-none-any.whl", hash = "sha256:1a0b6dd9221a1f5dddf725b57ac0cb6fddc7b5f470576231ae9162b9b3455a04", size = 605320, upload-time = "2025-05-31T16:34:52.154Z" }, +] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments", marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, +] + +[[package]] +name = "ipywidgets" +version = "8.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "comm" }, + { name = "ipython", version = "8.18.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "ipython", version = "8.37.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "ipython", version = "9.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "jupyterlab-widgets" }, + { name = "traitlets" }, + { name = "widgetsnbextension" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3e/48/d3dbac45c2814cb73812f98dd6b38bbcc957a4e7bb31d6ea9c03bf94ed87/ipywidgets-8.1.7.tar.gz", hash = "sha256:15f1ac050b9ccbefd45dccfbb2ef6bed0029d8278682d569d71b8dd96bee0376", size = 116721, upload-time = "2025-05-05T12:42:03.489Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/6a/9166369a2f092bd286d24e6307de555d63616e8ddb373ebad2b5635ca4cd/ipywidgets-8.1.7-py3-none-any.whl", hash = "sha256:764f2602d25471c213919b8a1997df04bef869251db4ca8efba1b76b1bd9f7bb", size = 139806, upload-time = "2025-05-05T12:41:56.833Z" }, +] + +[[package]] +name = "isoduration" +version = "20.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "arrow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/1a/3c8edc664e06e6bd06cce40c6b22da5f1429aa4224d0c590f3be21c91ead/isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9", size = 11649, upload-time = "2020-11-01T11:00:00.312Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/55/e5326141505c5d5e34c5e0935d2908a74e4561eca44108fbfb9c13d2911a/isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042", size = 11321, upload-time = "2020-11-01T10:59:58.02Z" }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "json5" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/be/c6c745ec4c4539b25a278b70e29793f10382947df0d9efba2fa09120895d/json5-0.12.0.tar.gz", hash = "sha256:0b4b6ff56801a1c7dc817b0241bca4ce474a0e6a163bfef3fc594d3fd263ff3a", size = 51907, upload-time = "2025-04-03T16:33:13.201Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/9f/3500910d5a98549e3098807493851eeef2b89cdd3032227558a104dfe926/json5-0.12.0-py3-none-any.whl", hash = "sha256:6d37aa6c08b0609f16e1ec5ff94697e2cbbfbad5ac112afa05794da9ab7810db", size = 36079, upload-time = "2025-04-03T16:33:11.927Z" }, +] + +[[package]] +name = "jsonpointer" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.24.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/d3/1cf5326b923a53515d8f3a2cd442e6d7e94fcc444716e879ea70a0ce3177/jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196", size = 353480, upload-time = "2025-05-26T18:48:10.459Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/3d/023389198f69c722d039351050738d6755376c8fd343e91dc493ea485905/jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d", size = 88709, upload-time = "2025-05-26T18:48:08.417Z" }, +] + +[package.optional-dependencies] +format-nongpl = [ + { name = "fqdn" }, + { name = "idna" }, + { name = "isoduration" }, + { name = "jsonpointer" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "uri-template" }, + { name = "webcolors" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/ce/46fbd9c8119cfc3581ee5643ea49464d168028cfb5caff5fc0596d0cf914/jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608", size = 15513, upload-time = "2025-04-23T12:34:07.418Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" }, +] + +[[package]] +name = "jupyter" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipykernel" }, + { name = "ipywidgets" }, + { name = "jupyter-console" }, + { name = "jupyterlab" }, + { name = "nbconvert" }, + { name = "notebook" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/f3/af28ea964ab8bc1e472dba2e82627d36d470c51f5cd38c37502eeffaa25e/jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a", size = 5714959, upload-time = "2024-08-30T07:15:48.299Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/64/285f20a31679bf547b75602702f7800e74dbabae36ef324f716c02804753/jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83", size = 2657, upload-time = "2024-08-30T07:15:47.045Z" }, +] + +[[package]] +name = "jupyter-client" +version = "8.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jupyter-core" }, + { name = "python-dateutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/22/bf9f12fdaeae18019a468b68952a60fe6dbab5d67cd2a103cac7659b41ca/jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419", size = 342019, upload-time = "2024-09-17T10:44:17.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/85/b0394e0b6fcccd2c1eeefc230978a6f8cb0c5df1e4cd3e7625735a0d7d1e/jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f", size = 106105, upload-time = "2024-09-17T10:44:15.218Z" }, +] + +[[package]] +name = "jupyter-console" +version = "6.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipykernel" }, + { name = "ipython", version = "8.18.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "ipython", version = "8.37.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "ipython", version = "9.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "pyzmq" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bd/2d/e2fd31e2fc41c14e2bcb6c976ab732597e907523f6b2420305f9fc7fdbdb/jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539", size = 34363, upload-time = "2023-03-06T14:13:31.02Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/77/71d78d58f15c22db16328a476426f7ac4a60d3a5a7ba3b9627ee2f7903d4/jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485", size = 24510, upload-time = "2023-03-06T14:13:28.229Z" }, +] + +[[package]] +name = "jupyter-core" +version = "5.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "platformdirs" }, + { name = "pywin32", marker = "platform_python_implementation != 'PyPy' and sys_platform == 'win32'" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/1b/72906d554acfeb588332eaaa6f61577705e9ec752ddb486f302dafa292d9/jupyter_core-5.8.1.tar.gz", hash = "sha256:0a5f9706f70e64786b75acba995988915ebd4601c8a52e534a40b51c95f59941", size = 88923, upload-time = "2025-05-27T07:38:16.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/57/6bffd4b20b88da3800c5d691e0337761576ee688eb01299eae865689d2df/jupyter_core-5.8.1-py3-none-any.whl", hash = "sha256:c28d268fc90fb53f1338ded2eb410704c5449a358406e8a948b75706e24863d0", size = 28880, upload-time = "2025-05-27T07:38:15.137Z" }, +] + +[[package]] +name = "jupyter-events" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonschema", extra = ["format-nongpl"] }, + { name = "packaging" }, + { name = "python-json-logger" }, + { name = "pyyaml", version = "6.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "pyyaml", version = "6.0.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, + { name = "referencing" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/c3/306d090461e4cf3cd91eceaff84bede12a8e52cd821c2d20c9a4fd728385/jupyter_events-0.12.0.tar.gz", hash = "sha256:fc3fce98865f6784c9cd0a56a20644fc6098f21c8c33834a8d9fe383c17e554b", size = 62196, upload-time = "2025-02-03T17:23:41.485Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/48/577993f1f99c552f18a0428731a755e06171f9902fa118c379eb7c04ea22/jupyter_events-0.12.0-py3-none-any.whl", hash = "sha256:6464b2fa5ad10451c3d35fabc75eab39556ae1e2853ad0c0cc31b656731a97fb", size = 19430, upload-time = "2025-02-03T17:23:38.643Z" }, +] + +[[package]] +name = "jupyter-lsp" +version = "2.2.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jupyter-server" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/b4/3200b0b09c12bc3b72d943d923323c398eff382d1dcc7c0dbc8b74630e40/jupyter-lsp-2.2.5.tar.gz", hash = "sha256:793147a05ad446f809fd53ef1cd19a9f5256fd0a2d6b7ce943a982cb4f545001", size = 48741, upload-time = "2024-04-09T17:59:44.918Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/e0/7bd7cff65594fd9936e2f9385701e44574fc7d721331ff676ce440b14100/jupyter_lsp-2.2.5-py3-none-any.whl", hash = "sha256:45fbddbd505f3fbfb0b6cb2f1bc5e15e83ab7c79cd6e89416b248cb3c00c11da", size = 69146, upload-time = "2024-04-09T17:59:43.388Z" }, +] + +[[package]] +name = "jupyter-server" +version = "2.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "argon2-cffi" }, + { name = "jinja2" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "jupyter-events" }, + { name = "jupyter-server-terminals" }, + { name = "nbconvert" }, + { name = "nbformat" }, + { name = "overrides" }, + { name = "packaging" }, + { name = "prometheus-client" }, + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pyzmq" }, + { name = "send2trash" }, + { name = "terminado" }, + { name = "tornado" }, + { name = "traitlets" }, + { name = "websocket-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/c8/ba2bbcd758c47f1124c4ca14061e8ce60d9c6fd537faee9534a95f83521a/jupyter_server-2.16.0.tar.gz", hash = "sha256:65d4b44fdf2dcbbdfe0aa1ace4a842d4aaf746a2b7b168134d5aaed35621b7f6", size = 728177, upload-time = "2025-05-12T16:44:46.245Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/1f/5ebbced977171d09a7b0c08a285ff9a20aafb9c51bde07e52349ff1ddd71/jupyter_server-2.16.0-py3-none-any.whl", hash = "sha256:3d8db5be3bc64403b1c65b400a1d7f4647a5ce743f3b20dbdefe8ddb7b55af9e", size = 386904, upload-time = "2025-05-12T16:44:43.335Z" }, +] + +[[package]] +name = "jupyter-server-terminals" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "terminado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/d5/562469734f476159e99a55426d697cbf8e7eb5efe89fb0e0b4f83a3d3459/jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269", size = 31430, upload-time = "2024-03-12T14:37:03.049Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/2d/2b32cdbe8d2a602f697a649798554e4f072115438e92249624e532e8aca6/jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa", size = 13656, upload-time = "2024-03-12T14:37:00.708Z" }, +] + +[[package]] +name = "jupyterlab" +version = "4.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "async-lru" }, + { name = "httpx" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "ipykernel" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyter-lsp" }, + { name = "jupyter-server" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "packaging" }, + { name = "setuptools" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d3/2d/d1678dcf2db66cb4a38a80d9e5fcf48c349f3ac12f2d38882993353ae768/jupyterlab-4.4.3.tar.gz", hash = "sha256:a94c32fd7f8b93e82a49dc70a6ec45a5c18281ca2a7228d12765e4e210e5bca2", size = 23032376, upload-time = "2025-05-26T11:18:00.996Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/4d/7dd5c2ffbb960930452a031dc8410746183c924580f2ab4e68ceb5b3043f/jupyterlab-4.4.3-py3-none-any.whl", hash = "sha256:164302f6d4b6c44773dfc38d585665a4db401a16e5296c37df5cba63904fbdea", size = 12295480, upload-time = "2025-05-26T11:17:56.607Z" }, +] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/90/51/9187be60d989df97f5f0aba133fa54e7300f17616e065d1ada7d7646b6d6/jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d", size = 512900, upload-time = "2023-11-23T09:26:37.44Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/dd/ead9d8ea85bf202d90cc513b533f9c363121c7792674f78e0d8a854b63b4/jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780", size = 15884, upload-time = "2023-11-23T09:26:34.325Z" }, +] + +[[package]] +name = "jupyterlab-server" +version = "2.27.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "babel" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jinja2" }, + { name = "json5" }, + { name = "jsonschema" }, + { name = "jupyter-server" }, + { name = "packaging" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0a/c9/a883ce65eb27905ce77ace410d83587c82ea64dc85a48d1f7ed52bcfa68d/jupyterlab_server-2.27.3.tar.gz", hash = "sha256:eb36caca59e74471988f0ae25c77945610b887f777255aa21f8065def9e51ed4", size = 76173, upload-time = "2024-07-16T17:02:04.149Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/09/2032e7d15c544a0e3cd831c51d77a8ca57f7555b2e1b2922142eddb02a84/jupyterlab_server-2.27.3-py3-none-any.whl", hash = "sha256:e697488f66c3db49df675158a77b3b017520d772c6e1548c7d9bcc5df7944ee4", size = 59700, upload-time = "2024-07-16T17:02:01.115Z" }, +] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/7d/160595ca88ee87ac6ba95d82177d29ec60aaa63821d3077babb22ce031a5/jupyterlab_widgets-3.0.15.tar.gz", hash = "sha256:2920888a0c2922351a9202817957a68c07d99673504d6cd37345299e971bb08b", size = 213149, upload-time = "2025-05-05T12:32:31.004Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/6a/ca128561b22b60bd5a0c4ea26649e68c8556b82bc70a0c396eebc977fe86/jupyterlab_widgets-3.0.15-py3-none-any.whl", hash = "sha256:d59023d7d7ef71400d51e6fee9a88867f6e65e10a4201605d2d7f3e8f012a31c", size = 216571, upload-time = "2025-05-05T12:32:29.534Z" }, +] + +[[package]] +name = "lark" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/60/bc7622aefb2aee1c0b4ba23c1446d3e30225c8770b38d7aedbfb65ca9d5a/lark-1.2.2.tar.gz", hash = "sha256:ca807d0162cd16cef15a8feecb862d7319e7a09bdb13aef927968e45040fed80", size = 252132, upload-time = "2024-08-13T19:49:00.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/00/d90b10b962b4277f5e64a78b6609968859ff86889f5b898c1a778c06ec00/lark-1.2.2-py3-none-any.whl", hash = "sha256:c2276486b02f0f1b90be155f2c8ba4a8e194d42775786db622faccd652d8e80c", size = 111036, upload-time = "2024-08-13T19:48:58.603Z" }, +] + +[[package]] +name = "libcst" +source = { editable = "." } +dependencies = [ + { name = "pyyaml", version = "6.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, + { name = "pyyaml", version = "6.0.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, + { name = "pyyaml-ft", marker = "python_full_version == '3.13.*'" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] + +[package.dev-dependencies] +dev = [ + { name = "black" }, + { name = "build" }, + { name = "coverage", extra = ["toml"] }, + { name = "fixit" }, + { name = "flake8" }, + { name = "hypothesis" }, + { name = "hypothesmith" }, + { name = "maturin" }, + { name = "poethepoet" }, + { name = "prompt-toolkit" }, + { name = "pyre-check", marker = "sys_platform != 'win32'" }, + { name = "setuptools-rust" }, + { name = "setuptools-scm" }, + { name = "slotscheck" }, + { name = "ufmt" }, + { name = "usort" }, +] +docs = [ + { name = "black" }, + { name = "build" }, + { name = "coverage", extra = ["toml"] }, + { name = "fixit" }, + { name = "flake8" }, + { name = "hypothesis" }, + { name = "hypothesmith" }, + { name = "jinja2" }, + { name = "jupyter" }, + { name = "maturin" }, + { name = "nbsphinx" }, + { name = "poethepoet" }, + { name = "prompt-toolkit" }, + { name = "pyre-check", marker = "sys_platform != 'win32'" }, + { name = "setuptools-rust" }, + { name = "setuptools-scm" }, + { name = "slotscheck" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx-rtd-theme" }, + { name = "ufmt" }, + { name = "usort" }, +] + +[package.metadata] +requires-dist = [ + { name = "pyyaml", marker = "python_full_version < '3.13'", specifier = ">=5.2" }, + { name = "pyyaml", marker = "python_full_version >= '3.14'", specifier = ">=6.0.3" }, + { name = "pyyaml-ft", marker = "python_full_version == '3.13.*'", specifier = ">=8.0.0" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "black", specifier = "==25.1.0" }, + { name = "build", specifier = ">=0.10.0" }, + { name = "coverage", extras = ["toml"], specifier = ">=4.5.4" }, + { name = "fixit", specifier = "==2.1.0" }, + { name = "flake8", specifier = "==7.2.0" }, + { name = "hypothesis", specifier = ">=4.36.0" }, + { name = "hypothesmith", specifier = ">=0.0.4" }, + { name = "maturin", specifier = ">=1.7.0,<1.8" }, + { name = "poethepoet", specifier = ">=0.35.0" }, + { name = "prompt-toolkit", specifier = ">=2.0.9" }, + { name = "pyre-check", marker = "sys_platform != 'win32'", specifier = "==0.9.18" }, + { name = "setuptools-rust", specifier = ">=1.5.2" }, + { name = "setuptools-scm", specifier = ">=6.0.1" }, + { name = "slotscheck", specifier = ">=0.7.1" }, + { name = "ufmt", specifier = "==2.8.0" }, + { name = "usort", specifier = "==1.0.8.post1" }, +] +docs = [ + { name = "black", specifier = "==25.1.0" }, + { name = "build", specifier = ">=0.10.0" }, + { name = "coverage", extras = ["toml"], specifier = ">=4.5.4" }, + { name = "fixit", specifier = "==2.1.0" }, + { name = "flake8", specifier = "==7.2.0" }, + { name = "hypothesis", specifier = ">=4.36.0" }, + { name = "hypothesmith", specifier = ">=0.0.4" }, + { name = "jinja2", specifier = "==3.1.6" }, + { name = "jupyter", specifier = ">=1.0.0" }, + { name = "maturin", specifier = ">=1.7.0,<1.8" }, + { name = "nbsphinx", specifier = ">=0.4.2" }, + { name = "poethepoet", specifier = ">=0.35.0" }, + { name = "prompt-toolkit", specifier = ">=2.0.9" }, + { name = "pyre-check", marker = "sys_platform != 'win32'", specifier = "==0.9.18" }, + { name = "setuptools-rust", specifier = ">=1.5.2" }, + { name = "setuptools-scm", specifier = ">=6.0.1" }, + { name = "slotscheck", specifier = ">=0.7.1" }, + { name = "sphinx", specifier = ">=5.1.1" }, + { name = "sphinx-rtd-theme", specifier = ">=0.4.3" }, + { name = "ufmt", specifier = "==2.8.0" }, + { name = "usort", specifier = "==1.0.8.post1" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" }, + { url = "https://files.pythonhosted.org/packages/a7/ea/9b1530c3fdeeca613faeb0fb5cbcf2389d816072fab72a71b45749ef6062/MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", size = 14344, upload-time = "2024-10-18T15:21:43.721Z" }, + { url = "https://files.pythonhosted.org/packages/4b/c2/fbdbfe48848e7112ab05e627e718e854d20192b674952d9042ebd8c9e5de/MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", size = 12389, upload-time = "2024-10-18T15:21:44.666Z" }, + { url = "https://files.pythonhosted.org/packages/f0/25/7a7c6e4dbd4f867d95d94ca15449e91e52856f6ed1905d58ef1de5e211d0/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", size = 21607, upload-time = "2024-10-18T15:21:45.452Z" }, + { url = "https://files.pythonhosted.org/packages/53/8f/f339c98a178f3c1e545622206b40986a4c3307fe39f70ccd3d9df9a9e425/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", size = 20728, upload-time = "2024-10-18T15:21:46.295Z" }, + { url = "https://files.pythonhosted.org/packages/1a/03/8496a1a78308456dbd50b23a385c69b41f2e9661c67ea1329849a598a8f9/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", size = 20826, upload-time = "2024-10-18T15:21:47.134Z" }, + { url = "https://files.pythonhosted.org/packages/e6/cf/0a490a4bd363048c3022f2f475c8c05582179bb179defcee4766fb3dcc18/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", size = 21843, upload-time = "2024-10-18T15:21:48.334Z" }, + { url = "https://files.pythonhosted.org/packages/19/a3/34187a78613920dfd3cdf68ef6ce5e99c4f3417f035694074beb8848cd77/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", size = 21219, upload-time = "2024-10-18T15:21:49.587Z" }, + { url = "https://files.pythonhosted.org/packages/17/d8/5811082f85bb88410ad7e452263af048d685669bbbfb7b595e8689152498/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", size = 20946, upload-time = "2024-10-18T15:21:50.441Z" }, + { url = "https://files.pythonhosted.org/packages/7c/31/bd635fb5989440d9365c5e3c47556cfea121c7803f5034ac843e8f37c2f2/MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", size = 15063, upload-time = "2024-10-18T15:21:51.385Z" }, + { url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506, upload-time = "2024-10-18T15:21:52.974Z" }, +] + +[[package]] +name = "marshmallow" +version = "3.26.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/5e/5e53d26b42ab75491cda89b871dab9e97c840bf12c63ec58a1919710cd06/marshmallow-3.26.1.tar.gz", hash = "sha256:e6d8affb6cb61d39d26402096dc0aee12d5a26d490a121f118d2e81dc0719dc6", size = 221825, upload-time = "2025-02-03T15:32:25.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/75/51952c7b2d3873b44a0028b1bd26a25078c18f92f256608e8d1dc61b39fd/marshmallow-3.26.1-py3-none-any.whl", hash = "sha256:3350409f20a70a7e4e11a27661187b77cdcaeb20abca41c1454fe33636bea09c", size = 50878, upload-time = "2025-02-03T15:32:22.295Z" }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159, upload-time = "2024-04-15T13:44:44.803Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899, upload-time = "2024-04-15T13:44:43.265Z" }, +] + +[[package]] +name = "maturin" +version = "1.7.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/1e/085ddc0e5b08ae7af7a743a0dd6ed06b22a1332288488f1a333137885150/maturin-1.7.8.tar.gz", hash = "sha256:649c6ef3f0fa4c5f596140d761dc5a4d577c485cc32fb5b9b344a8280352880d", size = 195704, upload-time = "2024-12-04T11:38:23.268Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/ed/c8bb26e91c879e418ae1b01630722ed20b6fe0e6755be8d538d83666f136/maturin-1.7.8-py3-none-linux_armv6l.whl", hash = "sha256:c6950fd2790acd93265e1501cea66f9249cff19724654424ca75a3b17ebb315b", size = 7515691, upload-time = "2024-12-04T11:37:55.443Z" }, + { url = "https://files.pythonhosted.org/packages/38/7a/573f969315f0b92a09a0a565d45e98812c87796e2e19a7856159ab234faf/maturin-1.7.8-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:f98288d5c382bacf0c076871dfd50c38f1eb2248f417551e98dd6f47f6ee8afa", size = 14434454, upload-time = "2024-12-04T11:37:58.448Z" }, + { url = "https://files.pythonhosted.org/packages/a6/17/46834841fbf19231487f185e68b95ca348cc05cce49be8787e0bc7e9dc47/maturin-1.7.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b2d4e0f674ca29864e6b86c2eb9fee8236d1c7496c25f7300e34229272468f4c", size = 7509122, upload-time = "2024-12-04T11:38:01.355Z" }, + { url = "https://files.pythonhosted.org/packages/c1/8f/bf8b4871eb390a4baef2e0bb5016852c7c0311a9772e2945534cfa2ee40e/maturin-1.7.8-py3-none-manylinux_2_12_i686.manylinux2010_i686.musllinux_1_1_i686.whl", hash = "sha256:6cafb17bf57822bdc04423d9e3e766d42918d474848fe9833e397267514ba891", size = 7598870, upload-time = "2024-12-04T11:38:03.708Z" }, + { url = "https://files.pythonhosted.org/packages/dc/43/c842be67a7c59568082345249b956138ae93d0b2474fb41c186ce26d05e1/maturin-1.7.8-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.musllinux_1_1_x86_64.whl", hash = "sha256:2b2bdee0c3a84696b3a809054c43ead1a04b7b3321cbd5b8f5676e4ba4691d0f", size = 7932310, upload-time = "2024-12-04T11:38:05.463Z" }, + { url = "https://files.pythonhosted.org/packages/12/12/42435d05f2d6c75eb621751e6f021d29eb34d18e3b9c5c94d828744c2d54/maturin-1.7.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:b8188b71259fc2bc568d9c8acc186fcfed96f42539bcb55b8e6f4ec26e411f37", size = 7321964, upload-time = "2024-12-04T11:38:07.143Z" }, + { url = "https://files.pythonhosted.org/packages/b4/26/f3272ee985ebf9b3e8c4cd4f4efb022af1e12c9f53aed0dcc9a255399f4e/maturin-1.7.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl", hash = "sha256:a4f58c2a53c2958a1bf090960b08b28e676136cd88ac2f5dfdcf1b14ea54ec06", size = 7408613, upload-time = "2024-12-04T11:38:09.814Z" }, + { url = "https://files.pythonhosted.org/packages/36/7d/be27bcc7d3ac6e6c2136a8ec0cc56f227a292d6cfdde55e095b6c0aa24a9/maturin-1.7.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.musllinux_1_1_ppc64le.whl", hash = "sha256:c5d6c0c631d1fc646cd3834795e6cfd72ab4271d289df7e0f911261a02bec75f", size = 9496974, upload-time = "2024-12-04T11:38:11.618Z" }, + { url = "https://files.pythonhosted.org/packages/e1/e8/0d7323e9a31c11edf69c4473d73eca74803ce3e2390abf8ae3ac7eb10b04/maturin-1.7.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c23664d19dadcbf800ef70f26afb2e0485a985c62889930934f019c565534c23", size = 10828401, upload-time = "2024-12-04T11:38:14.42Z" }, + { url = "https://files.pythonhosted.org/packages/7e/82/5080e052c0d8c9872f6d4b94cae84c17ed7f2ea270d709210ea6445b655f/maturin-1.7.8-py3-none-win32.whl", hash = "sha256:403eebf1afa6f19b49425f089e39c53b8e597bc86a47f3a76e828dc78d27fa80", size = 6845240, upload-time = "2024-12-04T11:38:17.162Z" }, + { url = "https://files.pythonhosted.org/packages/6d/c9/9b162361ded893f36038c2f8ac6a972ec441c11df8d17c440997eb28090f/maturin-1.7.8-py3-none-win_amd64.whl", hash = "sha256:1ce48d007438b895f8665314b6748ac0dab31e4f32049a60b52281dd2dccbdde", size = 7762332, upload-time = "2024-12-04T11:38:19.445Z" }, + { url = "https://files.pythonhosted.org/packages/fa/40/46d4742db742f69a7fe0054cd7c82bc79b2d70cb8c91f7e737e75c28a5f3/maturin-1.7.8-py3-none-win_arm64.whl", hash = "sha256:cc92a62953205e8945b6cfe6943d6a8576a4442d30d9c67141f944f4f4640e62", size = 6501353, upload-time = "2024-12-04T11:38:21.713Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mistune" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/79/bda47f7dd7c3c55770478d6d02c9960c430b0cf1773b72366ff89126ea31/mistune-3.1.3.tar.gz", hash = "sha256:a7035c21782b2becb6be62f8f25d3df81ccb4d6fa477a6525b15af06539f02a0", size = 94347, upload-time = "2025-03-19T14:27:24.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/4d/23c4e4f09da849e127e9f123241946c23c1e30f45a88366879e064211815/mistune-3.1.3-py3-none-any.whl", hash = "sha256:1a32314113cff28aa6432e99e522677c8587fd83e3d51c29b82a52409c842bd9", size = 53410, upload-time = "2025-03-19T14:27:23.451Z" }, +] + +[[package]] +name = "moreorless" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8d/85/2e4999ac4a21ab3c5f31e2a48e0989a80be3afc512a7983e3253615983d4/moreorless-0.5.0.tar.gz", hash = "sha256:560a04f85006fccd74feaa4b6213a446392ff7b5ec0194a5464b6c30f182fa33", size = 14093, upload-time = "2025-05-04T22:29:59.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/2e/9ea80ca55b73530b7639c6f146a58f636ddfe5a852ad467a44fe3e80d809/moreorless-0.5.0-py3-none-any.whl", hash = "sha256:66228870cd2f14bad5c3c3780aa71e29d3b2d9b5a01c03bfbf105efd4f668ecf", size = 14380, upload-time = "2025-05-04T22:29:57.417Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nbclient" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "nbformat" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/87/66/7ffd18d58eae90d5721f9f39212327695b749e23ad44b3881744eaf4d9e8/nbclient-0.10.2.tar.gz", hash = "sha256:90b7fc6b810630db87a6d0c2250b1f0ab4cf4d3c27a299b0cde78a4ed3fd9193", size = 62424, upload-time = "2024-12-19T10:32:27.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/6d/e7fa07f03a4a7b221d94b4d586edb754a9b0dc3c9e2c93353e9fa4e0d117/nbclient-0.10.2-py3-none-any.whl", hash = "sha256:4ffee11e788b4a27fabeb7955547e4318a5298f34342a4bfd01f2e1faaeadc3d", size = 25434, upload-time = "2024-12-19T10:32:24.139Z" }, +] + +[[package]] +name = "nbconvert" +version = "7.16.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "bleach", extra = ["css"] }, + { name = "defusedxml" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyterlab-pygments" }, + { name = "markupsafe" }, + { name = "mistune" }, + { name = "nbclient" }, + { name = "nbformat" }, + { name = "packaging" }, + { name = "pandocfilters" }, + { name = "pygments" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/59/f28e15fc47ffb73af68a8d9b47367a8630d76e97ae85ad18271b9db96fdf/nbconvert-7.16.6.tar.gz", hash = "sha256:576a7e37c6480da7b8465eefa66c17844243816ce1ccc372633c6b71c3c0f582", size = 857715, upload-time = "2025-01-28T09:29:14.724Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/9a/cd673b2f773a12c992f41309ef81b99da1690426bd2f96957a7ade0d3ed7/nbconvert-7.16.6-py3-none-any.whl", hash = "sha256:1375a7b67e0c2883678c48e506dc320febb57685e5ee67faa51b18a90f3a712b", size = 258525, upload-time = "2025-01-28T09:29:12.551Z" }, +] + +[[package]] +name = "nbformat" +version = "5.10.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fastjsonschema" }, + { name = "jsonschema" }, + { name = "jupyter-core" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/fd/91545e604bc3dad7dca9ed03284086039b294c6b3d75c0d2fa45f9e9caf3/nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a", size = 142749, upload-time = "2024-04-04T11:20:37.371Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/82/0340caa499416c78e5d8f5f05947ae4bc3cba53c9f038ab6e9ed964e22f1/nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b", size = 78454, upload-time = "2024-04-04T11:20:34.895Z" }, +] + +[[package]] +name = "nbsphinx" +version = "0.9.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "jinja2" }, + { name = "nbconvert" }, + { name = "nbformat" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1e/84/b1856b7651ac34e965aa567a158714c7f3bd42a1b1ce76bf423ffb99872c/nbsphinx-0.9.7.tar.gz", hash = "sha256:abd298a686d55fa894ef697c51d44f24e53aa312dadae38e82920f250a5456fe", size = 180479, upload-time = "2025-03-03T19:46:08.069Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/2d/8c8e635bcc6757573d311bb3c5445426382f280da32b8cd6d82d501ef4a4/nbsphinx-0.9.7-py3-none-any.whl", hash = "sha256:7292c3767fea29e405c60743eee5393682a83982ab202ff98f5eb2db02629da8", size = 31660, upload-time = "2025-03-03T19:46:06.581Z" }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + +[[package]] +name = "notebook" +version = "7.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, + { name = "jupyterlab" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "tornado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/21/4f83b15e483da4f4f63928edd0cb08b6e7d33f8a15c23b116a90c44c6235/notebook-7.4.3.tar.gz", hash = "sha256:a1567481cd3853f2610ee0ecf5dfa12bb508e878ee8f92152c134ef7f0568a76", size = 13881668, upload-time = "2025-05-26T14:27:21.656Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/1b/16c809d799e3ddd7a97c8b43734f79624b74ddef9707e7d92275a13777bc/notebook-7.4.3-py3-none-any.whl", hash = "sha256:9cdeee954e04101cadb195d90e2ab62b7c9286c1d4f858bf3bb54e40df16c0c3", size = 14286402, upload-time = "2025-05-26T14:27:17.339Z" }, +] + +[[package]] +name = "notebook-shim" +version = "0.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/54/d2/92fa3243712b9a3e8bafaf60aac366da1cada3639ca767ff4b5b3654ec28/notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb", size = 13167, upload-time = "2024-02-14T23:35:18.353Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/33/bd5b9137445ea4b680023eb0469b2bb969d61303dedb2aac6560ff3d14a1/notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef", size = 13307, upload-time = "2024-02-14T23:35:16.286Z" }, +] + +[[package]] +name = "overrides" +version = "7.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/36/86/b585f53236dec60aba864e050778b25045f857e17f6e5ea0ae95fe80edd2/overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a", size = 22812, upload-time = "2024-01-27T21:01:33.423Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/ab/fc8290c6a4c722e5514d80f62b2dc4c4df1a68a41d1364e625c35990fcf3/overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49", size = 17832, upload-time = "2024-01-27T21:01:31.393Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pandocfilters" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/70/6f/3dd4940bbe001c06a65f88e36bad298bc7a0de5036115639926b0c5c0458/pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e", size = 8454, upload-time = "2024-01-18T20:08:13.726Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/af/4fbc8cab944db5d21b7e2a5b8e9211a03a79852b1157e2c102fcc61ac440/pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc", size = 8663, upload-time = "2024-01-18T20:08:11.28Z" }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609, upload-time = "2024-04-05T09:43:55.897Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650, upload-time = "2024-04-05T09:43:53.299Z" }, +] + +[[package]] +name = "pastel" +version = "0.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/76/f1/4594f5e0fcddb6953e5b8fe00da8c317b8b41b547e2b3ae2da7512943c62/pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d", size = 7555, upload-time = "2020-09-16T19:21:12.43Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/18/a8444036c6dd65ba3624c63b734d3ba95ba63ace513078e1580590075d21/pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364", size = 5955, upload-time = "2020-09-16T19:21:11.409Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362, upload-time = "2025-05-07T22:47:42.121Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" }, +] + +[[package]] +name = "poethepoet" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pastel" }, + { name = "pyyaml", version = "6.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.14'" }, + { name = "pyyaml", version = "6.0.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.14'" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/b1/d4f4361b278fae10f6074675385ce3acf53c647f8e6eeba22c652f8ba985/poethepoet-0.35.0.tar.gz", hash = "sha256:b396ae862d7626e680bbd0985b423acf71634ce93a32d8b5f38340f44f5fbc3e", size = 66006, upload-time = "2025-06-09T12:58:18.849Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/08/abc2d7e2400dd8906e3208f9b88ac610f097d7ee0c7a1fa4a157b49a9e86/poethepoet-0.35.0-py3-none-any.whl", hash = "sha256:bed5ae1fd63f179dfa67aabb93fa253d79695c69667c927d8b24ff378799ea75", size = 87164, upload-time = "2025-06-09T12:58:17.084Z" }, +] + +[[package]] +name = "prometheus-client" +version = "0.22.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/cf/40dde0a2be27cc1eb41e333d1a674a74ce8b8b0457269cc640fd42b07cf7/prometheus_client-0.22.1.tar.gz", hash = "sha256:190f1331e783cf21eb60bca559354e0a4d4378facecf78f5428c39b675d20d28", size = 69746, upload-time = "2025-06-02T14:29:01.152Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/ae/ec06af4fe3ee72d16973474f122541746196aaa16cea6f66d18b963c6177/prometheus_client-0.22.1-py3-none-any.whl", hash = "sha256:cca895342e308174341b2cbf99a56bef291fbc0ef7b9e5412a0f26d653ba7094", size = 58694, upload-time = "2025-06-02T14:29:00.068Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.51" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/6e/9d084c929dfe9e3bfe0c6a47e31f78a25c54627d64a66e884a8bf5474f1c/prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed", size = 428940, upload-time = "2025-04-15T09:18:47.731Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/4f/5249960887b1fbe561d9ff265496d170b55a735b76724f10ef19f9e40716/prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07", size = 387810, upload-time = "2025-04-15T09:18:44.753Z" }, +] + +[[package]] +name = "psutil" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003, upload-time = "2025-02-13T21:54:07.946Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051, upload-time = "2025-02-13T21:54:12.36Z" }, + { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535, upload-time = "2025-02-13T21:54:16.07Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004, upload-time = "2025-02-13T21:54:18.662Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986, upload-time = "2025-02-13T21:54:21.811Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544, upload-time = "2025-02-13T21:54:24.68Z" }, + { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053, upload-time = "2025-02-13T21:54:34.31Z" }, + { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, +] + +[[package]] +name = "pycodestyle" +version = "2.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/6e/1f4a62078e4d95d82367f24e685aef3a672abfd27d1a868068fed4ed2254/pycodestyle-2.13.0.tar.gz", hash = "sha256:c8415bf09abe81d9c7f872502a6eee881fbe85d8763dd5b9924bb0a01d67efae", size = 39312, upload-time = "2025-03-29T17:33:30.669Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/be/b00116df1bfb3e0bb5b45e29d604799f7b91dd861637e4d448b4e09e6a3e/pycodestyle-2.13.0-py2.py3-none-any.whl", hash = "sha256:35863c5974a271c7a726ed228a14a4f6daf49df369d8c50cd9a6f58a5e143ba9", size = 31424, upload-time = "2025-03-29T17:33:29.405Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pyflakes" +version = "3.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/cc/1df338bd7ed1fa7c317081dcf29bf2f01266603b301e6858856d346a12b3/pyflakes-3.3.2.tar.gz", hash = "sha256:6dfd61d87b97fba5dcfaaf781171ac16be16453be6d816147989e7f6e6a9576b", size = 64175, upload-time = "2025-03-31T13:21:20.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/40/b293a4fa769f3b02ab9e387c707c4cbdc34f073f945de0386107d4e669e6/pyflakes-3.3.2-py2.py3-none-any.whl", hash = "sha256:5039c8339cbb1944045f4ee5466908906180f13cc99cc9949348d10f82a5c32a", size = 63164, upload-time = "2025-03-31T13:21:18.503Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, +] + +[[package]] +name = "pyproject-hooks" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/82/28175b2414effca1cdac8dc99f76d660e7a4fb0ceefa4b4ab8f5f6742925/pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8", size = 19228, upload-time = "2024-09-29T09:24:13.293Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/24/12818598c362d7f300f18e74db45963dbcb85150324092410c8b49405e42/pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913", size = 10216, upload-time = "2024-09-29T09:24:11.978Z" }, +] + +[[package]] +name = "pyre-check" +version = "0.9.18" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "dataclasses-json" }, + { name = "intervaltree" }, + { name = "libcst" }, + { name = "psutil" }, + { name = "pyre-extensions" }, + { name = "tabulate" }, + { name = "testslide" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/02/a92e10ecddce435f794493e18e1c0add477e3c307023525a49cffa299163/pyre-check-0.9.18.tar.gz", hash = "sha256:d5eb6db9011a7207189ecd0eaf32951e46cb0769c0f96a78fd0b90e633c9df2c", size = 18030825, upload-time = "2023-02-14T00:59:29.593Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/d9/5207ccd2eff3870b44f11c2db6b6d8e31cbcaca973a1b5ba4ac6d2460e41/pyre_check-0.9.18-py3-none-macosx_10_11_x86_64.whl", hash = "sha256:22633f5af3b986d266451a9e386a32414f8868de0a94226c7766f81eb080c59d", size = 19378418, upload-time = "2023-02-14T00:59:24.891Z" }, + { url = "https://files.pythonhosted.org/packages/33/07/865a1ca2a57fc2e9a0f78e005938a465b8a2ff748538fb5a0c1c19cb661f/pyre_check-0.9.18-py3-none-manylinux1_x86_64.whl", hash = "sha256:5659d4dbd6d1dd3052359861d828419f07d1ced1dad4ce4ca79071d252699c26", size = 23486523, upload-time = "2023-02-14T00:59:21.022Z" }, +] + +[[package]] +name = "pyre-extensions" +version = "0.0.32" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, + { name = "typing-inspect" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/53/5bc2532536e921c48366ad1047c1344ccef6afa5e84053f0f6e20a453767/pyre_extensions-0.0.32.tar.gz", hash = "sha256:5396715f14ea56c4d5fd0a88c57ca7e44faa468f905909edd7de4ad90ed85e55", size = 10852, upload-time = "2024-11-22T19:26:44.152Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/7a/9812cb8be9828ab688203c5ac5f743c60652887f0c00995a6f6f19f912bd/pyre_extensions-0.0.32-py3-none-any.whl", hash = "sha256:a63ba6883ab02f4b1a9f372ed4eb4a2f4c6f3d74879aa2725186fdfcfe3e5c68", size = 12766, upload-time = "2024-11-22T19:26:42.465Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-json-logger" +version = "3.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/de/d3144a0bceede957f961e975f3752760fbe390d57fbe194baf709d8f1f7b/python_json_logger-3.3.0.tar.gz", hash = "sha256:12b7e74b17775e7d565129296105bbe3910842d9d0eb083fc83a6a617aa8df84", size = 16642, upload-time = "2025-03-07T07:08:27.301Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/20/0f2523b9e50a8052bc6a8b732dfc8568abbdc42010aef03a2d750bdab3b2/python_json_logger-3.3.0-py3-none-any.whl", hash = "sha256:dd980fae8cffb24c13caf6e158d3d61c0d6d22342f932cb6e9deedab3d35eec7", size = 15163, upload-time = "2025-03-07T07:08:25.627Z" }, +] + +[[package]] +name = "pywin32" +version = "310" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/da/a5f38fffbba2fb99aa4aa905480ac4b8e83ca486659ac8c95bce47fb5276/pywin32-310-cp310-cp310-win32.whl", hash = "sha256:6dd97011efc8bf51d6793a82292419eba2c71cf8e7250cfac03bba284454abc1", size = 8848240, upload-time = "2025-03-17T00:55:46.783Z" }, + { url = "https://files.pythonhosted.org/packages/aa/fe/d873a773324fa565619ba555a82c9dabd677301720f3660a731a5d07e49a/pywin32-310-cp310-cp310-win_amd64.whl", hash = "sha256:c3e78706e4229b915a0821941a84e7ef420bf2b77e08c9dae3c76fd03fd2ae3d", size = 9601854, upload-time = "2025-03-17T00:55:48.783Z" }, + { url = "https://files.pythonhosted.org/packages/3c/84/1a8e3d7a15490d28a5d816efa229ecb4999cdc51a7c30dd8914f669093b8/pywin32-310-cp310-cp310-win_arm64.whl", hash = "sha256:33babed0cf0c92a6f94cc6cc13546ab24ee13e3e800e61ed87609ab91e4c8213", size = 8522963, upload-time = "2025-03-17T00:55:50.969Z" }, + { url = "https://files.pythonhosted.org/packages/f7/b1/68aa2986129fb1011dabbe95f0136f44509afaf072b12b8f815905a39f33/pywin32-310-cp311-cp311-win32.whl", hash = "sha256:1e765f9564e83011a63321bb9d27ec456a0ed90d3732c4b2e312b855365ed8bd", size = 8784284, upload-time = "2025-03-17T00:55:53.124Z" }, + { url = "https://files.pythonhosted.org/packages/b3/bd/d1592635992dd8db5bb8ace0551bc3a769de1ac8850200cfa517e72739fb/pywin32-310-cp311-cp311-win_amd64.whl", hash = "sha256:126298077a9d7c95c53823934f000599f66ec9296b09167810eb24875f32689c", size = 9520748, upload-time = "2025-03-17T00:55:55.203Z" }, + { url = "https://files.pythonhosted.org/packages/90/b1/ac8b1ffce6603849eb45a91cf126c0fa5431f186c2e768bf56889c46f51c/pywin32-310-cp311-cp311-win_arm64.whl", hash = "sha256:19ec5fc9b1d51c4350be7bb00760ffce46e6c95eaf2f0b2f1150657b1a43c582", size = 8455941, upload-time = "2025-03-17T00:55:57.048Z" }, + { url = "https://files.pythonhosted.org/packages/6b/ec/4fdbe47932f671d6e348474ea35ed94227fb5df56a7c30cbbb42cd396ed0/pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d", size = 8796239, upload-time = "2025-03-17T00:55:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/e3/e5/b0627f8bb84e06991bea89ad8153a9e50ace40b2e1195d68e9dff6b03d0f/pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060", size = 9503839, upload-time = "2025-03-17T00:56:00.8Z" }, + { url = "https://files.pythonhosted.org/packages/1f/32/9ccf53748df72301a89713936645a664ec001abd35ecc8578beda593d37d/pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966", size = 8459470, upload-time = "2025-03-17T00:56:02.601Z" }, + { url = "https://files.pythonhosted.org/packages/1c/09/9c1b978ffc4ae53999e89c19c77ba882d9fce476729f23ef55211ea1c034/pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab", size = 8794384, upload-time = "2025-03-17T00:56:04.383Z" }, + { url = "https://files.pythonhosted.org/packages/45/3c/b4640f740ffebadd5d34df35fecba0e1cfef8fde9f3e594df91c28ad9b50/pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e", size = 9503039, upload-time = "2025-03-17T00:56:06.207Z" }, + { url = "https://files.pythonhosted.org/packages/b4/f4/f785020090fb050e7fb6d34b780f2231f302609dc964672f72bfaeb59a28/pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33", size = 8458152, upload-time = "2025-03-17T00:56:07.819Z" }, + { url = "https://files.pythonhosted.org/packages/a2/cd/d09d434630edb6a0c44ad5079611279a67530296cfe0451e003de7f449ff/pywin32-310-cp39-cp39-win32.whl", hash = "sha256:851c8d927af0d879221e616ae1f66145253537bbdd321a77e8ef701b443a9a1a", size = 8848099, upload-time = "2025-03-17T00:55:42.415Z" }, + { url = "https://files.pythonhosted.org/packages/93/ff/2a8c10315ffbdee7b3883ac0d1667e267ca8b3f6f640d81d43b87a82c0c7/pywin32-310-cp39-cp39-win_amd64.whl", hash = "sha256:96867217335559ac619f00ad70e513c0fcf84b8a3af9fc2bba3b59b97da70475", size = 9602031, upload-time = "2025-03-17T00:55:44.512Z" }, +] + +[[package]] +name = "pywinpty" +version = "2.0.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/7c/917f9c4681bb8d34bfbe0b79d36bbcd902651aeab48790df3d30ba0202fb/pywinpty-2.0.15.tar.gz", hash = "sha256:312cf39153a8736c617d45ce8b6ad6cd2107de121df91c455b10ce6bba7a39b2", size = 29017, upload-time = "2025-02-03T21:53:23.265Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a6/b7/855db919ae526d2628f3f2e6c281c4cdff7a9a8af51bb84659a9f07b1861/pywinpty-2.0.15-cp310-cp310-win_amd64.whl", hash = "sha256:8e7f5de756a615a38b96cd86fa3cd65f901ce54ce147a3179c45907fa11b4c4e", size = 1405161, upload-time = "2025-02-03T21:56:25.008Z" }, + { url = "https://files.pythonhosted.org/packages/5e/ac/6884dcb7108af66ad53f73ef4dad096e768c9203a6e6ce5e6b0c4a46e238/pywinpty-2.0.15-cp311-cp311-win_amd64.whl", hash = "sha256:9a6bcec2df2707aaa9d08b86071970ee32c5026e10bcc3cc5f6f391d85baf7ca", size = 1405249, upload-time = "2025-02-03T21:55:47.114Z" }, + { url = "https://files.pythonhosted.org/packages/88/e5/9714def18c3a411809771a3fbcec70bffa764b9675afb00048a620fca604/pywinpty-2.0.15-cp312-cp312-win_amd64.whl", hash = "sha256:83a8f20b430bbc5d8957249f875341a60219a4e971580f2ba694fbfb54a45ebc", size = 1405243, upload-time = "2025-02-03T21:56:52.476Z" }, + { url = "https://files.pythonhosted.org/packages/fb/16/2ab7b3b7f55f3c6929e5f629e1a68362981e4e5fed592a2ed1cb4b4914a5/pywinpty-2.0.15-cp313-cp313-win_amd64.whl", hash = "sha256:ab5920877dd632c124b4ed17bc6dd6ef3b9f86cd492b963ffdb1a67b85b0f408", size = 1405020, upload-time = "2025-02-03T21:56:04.753Z" }, + { url = "https://files.pythonhosted.org/packages/7c/16/edef3515dd2030db2795dbfbe392232c7a0f3dc41b98e92b38b42ba497c7/pywinpty-2.0.15-cp313-cp313t-win_amd64.whl", hash = "sha256:a4560ad8c01e537708d2790dbe7da7d986791de805d89dd0d3697ca59e9e4901", size = 1404151, upload-time = "2025-02-03T21:55:53.628Z" }, + { url = "https://files.pythonhosted.org/packages/47/96/90fa02f19b1eff7469ad7bf0ef8efca248025de9f1d0a0b25682d2aacf68/pywinpty-2.0.15-cp39-cp39-win_amd64.whl", hash = "sha256:d261cd88fcd358cfb48a7ca0700db3e1c088c9c10403c9ebc0d8a8b57aa6a117", size = 1405302, upload-time = "2025-02-03T21:55:40.394Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.13.*'", + "python_full_version >= '3.11' and python_full_version < '3.13'", + "python_full_version == '3.10.*'", + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199, upload-time = "2024-08-06T20:31:40.178Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758, upload-time = "2024-08-06T20:31:42.173Z" }, + { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463, upload-time = "2024-08-06T20:31:44.263Z" }, + { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280, upload-time = "2024-08-06T20:31:50.199Z" }, + { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239, upload-time = "2024-08-06T20:31:52.292Z" }, + { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802, upload-time = "2024-08-06T20:31:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527, upload-time = "2024-08-06T20:31:55.565Z" }, + { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052, upload-time = "2024-08-06T20:31:56.914Z" }, + { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774, upload-time = "2024-08-06T20:31:58.304Z" }, + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612, upload-time = "2024-08-06T20:32:03.408Z" }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040, upload-time = "2024-08-06T20:32:04.926Z" }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829, upload-time = "2024-08-06T20:32:06.459Z" }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167, upload-time = "2024-08-06T20:32:08.338Z" }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952, upload-time = "2024-08-06T20:32:14.124Z" }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301, upload-time = "2024-08-06T20:32:16.17Z" }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638, upload-time = "2024-08-06T20:32:18.555Z" }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850, upload-time = "2024-08-06T20:32:19.889Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980, upload-time = "2024-08-06T20:32:21.273Z" }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, + { url = "https://files.pythonhosted.org/packages/65/d8/b7a1db13636d7fb7d4ff431593c510c8b8fca920ade06ca8ef20015493c5/PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", size = 184777, upload-time = "2024-08-06T20:33:25.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/02/6ec546cd45143fdf9840b2c6be8d875116a64076218b61d68e12548e5839/PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", size = 172318, upload-time = "2024-08-06T20:33:27.212Z" }, + { url = "https://files.pythonhosted.org/packages/0e/9a/8cc68be846c972bda34f6c2a93abb644fb2476f4dcc924d52175786932c9/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", size = 720891, upload-time = "2024-08-06T20:33:28.974Z" }, + { url = "https://files.pythonhosted.org/packages/e9/6c/6e1b7f40181bc4805e2e07f4abc10a88ce4648e7e95ff1abe4ae4014a9b2/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", size = 722614, upload-time = "2024-08-06T20:33:34.157Z" }, + { url = "https://files.pythonhosted.org/packages/3d/32/e7bd8535d22ea2874cef6a81021ba019474ace0d13a4819c2a4bce79bd6a/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", size = 737360, upload-time = "2024-08-06T20:33:35.84Z" }, + { url = "https://files.pythonhosted.org/packages/d7/12/7322c1e30b9be969670b672573d45479edef72c9a0deac3bb2868f5d7469/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", size = 699006, upload-time = "2024-08-06T20:33:37.501Z" }, + { url = "https://files.pythonhosted.org/packages/82/72/04fcad41ca56491995076630c3ec1e834be241664c0c09a64c9a2589b507/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", size = 723577, upload-time = "2024-08-06T20:33:39.389Z" }, + { url = "https://files.pythonhosted.org/packages/ed/5e/46168b1f2757f1fcd442bc3029cd8767d88a98c9c05770d8b420948743bb/PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", size = 144593, upload-time = "2024-08-06T20:33:46.63Z" }, + { url = "https://files.pythonhosted.org/packages/19/87/5124b1c1f2412bb95c59ec481eaf936cd32f0fe2a7b16b97b81c4c017a6a/PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", size = 162312, upload-time = "2024-08-06T20:33:49.073Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.14'", +] +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/a0/39350dd17dd6d6c6507025c0e53aef67a9293a6d37d3511f23ea510d5800/pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b", size = 184227, upload-time = "2025-09-25T21:31:46.04Z" }, + { url = "https://files.pythonhosted.org/packages/05/14/52d505b5c59ce73244f59c7a50ecf47093ce4765f116cdb98286a71eeca2/pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956", size = 174019, upload-time = "2025-09-25T21:31:47.706Z" }, + { url = "https://files.pythonhosted.org/packages/43/f7/0e6a5ae5599c838c696adb4e6330a59f463265bfa1e116cfd1fbb0abaaae/pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8", size = 740646, upload-time = "2025-09-25T21:31:49.21Z" }, + { url = "https://files.pythonhosted.org/packages/2f/3a/61b9db1d28f00f8fd0ae760459a5c4bf1b941baf714e207b6eb0657d2578/pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198", size = 840793, upload-time = "2025-09-25T21:31:50.735Z" }, + { url = "https://files.pythonhosted.org/packages/7a/1e/7acc4f0e74c4b3d9531e24739e0ab832a5edf40e64fbae1a9c01941cabd7/pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b", size = 770293, upload-time = "2025-09-25T21:31:51.828Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ef/abd085f06853af0cd59fa5f913d61a8eab65d7639ff2a658d18a25d6a89d/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0", size = 732872, upload-time = "2025-09-25T21:31:53.282Z" }, + { url = "https://files.pythonhosted.org/packages/1f/15/2bc9c8faf6450a8b3c9fc5448ed869c599c0a74ba2669772b1f3a0040180/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69", size = 758828, upload-time = "2025-09-25T21:31:54.807Z" }, + { url = "https://files.pythonhosted.org/packages/a3/00/531e92e88c00f4333ce359e50c19b8d1de9fe8d581b1534e35ccfbc5f393/pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e", size = 142415, upload-time = "2025-09-25T21:31:55.885Z" }, + { url = "https://files.pythonhosted.org/packages/2a/fa/926c003379b19fca39dd4634818b00dec6c62d87faf628d1394e137354d4/pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c", size = 158561, upload-time = "2025-09-25T21:31:57.406Z" }, + { url = "https://files.pythonhosted.org/packages/6d/16/a95b6757765b7b031c9374925bb718d55e0a9ba8a1b6a12d25962ea44347/pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e", size = 185826, upload-time = "2025-09-25T21:31:58.655Z" }, + { url = "https://files.pythonhosted.org/packages/16/19/13de8e4377ed53079ee996e1ab0a9c33ec2faf808a4647b7b4c0d46dd239/pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824", size = 175577, upload-time = "2025-09-25T21:32:00.088Z" }, + { url = "https://files.pythonhosted.org/packages/0c/62/d2eb46264d4b157dae1275b573017abec435397aa59cbcdab6fc978a8af4/pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c", size = 775556, upload-time = "2025-09-25T21:32:01.31Z" }, + { url = "https://files.pythonhosted.org/packages/10/cb/16c3f2cf3266edd25aaa00d6c4350381c8b012ed6f5276675b9eba8d9ff4/pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00", size = 882114, upload-time = "2025-09-25T21:32:03.376Z" }, + { url = "https://files.pythonhosted.org/packages/71/60/917329f640924b18ff085ab889a11c763e0b573da888e8404ff486657602/pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d", size = 806638, upload-time = "2025-09-25T21:32:04.553Z" }, + { url = "https://files.pythonhosted.org/packages/dd/6f/529b0f316a9fd167281a6c3826b5583e6192dba792dd55e3203d3f8e655a/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a", size = 767463, upload-time = "2025-09-25T21:32:06.152Z" }, + { url = "https://files.pythonhosted.org/packages/f2/6a/b627b4e0c1dd03718543519ffb2f1deea4a1e6d42fbab8021936a4d22589/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4", size = 794986, upload-time = "2025-09-25T21:32:07.367Z" }, + { url = "https://files.pythonhosted.org/packages/45/91/47a6e1c42d9ee337c4839208f30d9f09caa9f720ec7582917b264defc875/pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b", size = 142543, upload-time = "2025-09-25T21:32:08.95Z" }, + { url = "https://files.pythonhosted.org/packages/da/e3/ea007450a105ae919a72393cb06f122f288ef60bba2dc64b26e2646fa315/pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf", size = 158763, upload-time = "2025-09-25T21:32:09.96Z" }, + { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, + { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, + { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, + { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, + { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, + { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, + { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, + { url = "https://files.pythonhosted.org/packages/9f/62/67fc8e68a75f738c9200422bf65693fb79a4cd0dc5b23310e5202e978090/pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da", size = 184450, upload-time = "2025-09-25T21:33:00.618Z" }, + { url = "https://files.pythonhosted.org/packages/ae/92/861f152ce87c452b11b9d0977952259aa7df792d71c1053365cc7b09cc08/pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917", size = 174319, upload-time = "2025-09-25T21:33:02.086Z" }, + { url = "https://files.pythonhosted.org/packages/d0/cd/f0cfc8c74f8a030017a2b9c771b7f47e5dd702c3e28e5b2071374bda2948/pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9", size = 737631, upload-time = "2025-09-25T21:33:03.25Z" }, + { url = "https://files.pythonhosted.org/packages/ef/b2/18f2bd28cd2055a79a46c9b0895c0b3d987ce40ee471cecf58a1a0199805/pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5", size = 836795, upload-time = "2025-09-25T21:33:05.014Z" }, + { url = "https://files.pythonhosted.org/packages/73/b9/793686b2d54b531203c160ef12bec60228a0109c79bae6c1277961026770/pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a", size = 750767, upload-time = "2025-09-25T21:33:06.398Z" }, + { url = "https://files.pythonhosted.org/packages/a9/86/a137b39a611def2ed78b0e66ce2fe13ee701a07c07aebe55c340ed2a050e/pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926", size = 727982, upload-time = "2025-09-25T21:33:08.708Z" }, + { url = "https://files.pythonhosted.org/packages/dd/62/71c27c94f457cf4418ef8ccc71735324c549f7e3ea9d34aba50874563561/pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7", size = 755677, upload-time = "2025-09-25T21:33:09.876Z" }, + { url = "https://files.pythonhosted.org/packages/29/3d/6f5e0d58bd924fb0d06c3a6bad00effbdae2de5adb5cda5648006ffbd8d3/pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0", size = 142592, upload-time = "2025-09-25T21:33:10.983Z" }, + { url = "https://files.pythonhosted.org/packages/f0/0c/25113e0b5e103d7f1490c0e947e303fe4a696c10b501dea7a9f49d4e876c/pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007", size = 158777, upload-time = "2025-09-25T21:33:15.55Z" }, +] + +[[package]] +name = "pyyaml-ft" +version = "8.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/eb/5a0d575de784f9a1f94e2b1288c6886f13f34185e13117ed530f32b6f8a8/pyyaml_ft-8.0.0.tar.gz", hash = "sha256:0c947dce03954c7b5d38869ed4878b2e6ff1d44b08a0d84dc83fdad205ae39ab", size = 141057, upload-time = "2025-06-10T15:32:15.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/ba/a067369fe61a2e57fb38732562927d5bae088c73cb9bb5438736a9555b29/pyyaml_ft-8.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8c1306282bc958bfda31237f900eb52c9bedf9b93a11f82e1aab004c9a5657a6", size = 187027, upload-time = "2025-06-10T15:31:48.722Z" }, + { url = "https://files.pythonhosted.org/packages/ad/c5/a3d2020ce5ccfc6aede0d45bcb870298652ac0cf199f67714d250e0cdf39/pyyaml_ft-8.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:30c5f1751625786c19de751e3130fc345ebcba6a86f6bddd6e1285342f4bbb69", size = 176146, upload-time = "2025-06-10T15:31:50.584Z" }, + { url = "https://files.pythonhosted.org/packages/e3/bb/23a9739291086ca0d3189eac7cd92b4d00e9fdc77d722ab610c35f9a82ba/pyyaml_ft-8.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fa992481155ddda2e303fcc74c79c05eddcdbc907b888d3d9ce3ff3e2adcfb0", size = 746792, upload-time = "2025-06-10T15:31:52.304Z" }, + { url = "https://files.pythonhosted.org/packages/5f/c2/e8825f4ff725b7e560d62a3609e31d735318068e1079539ebfde397ea03e/pyyaml_ft-8.0.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cec6c92b4207004b62dfad1f0be321c9f04725e0f271c16247d8b39c3bf3ea42", size = 786772, upload-time = "2025-06-10T15:31:54.712Z" }, + { url = "https://files.pythonhosted.org/packages/35/be/58a4dcae8854f2fdca9b28d9495298fd5571a50d8430b1c3033ec95d2d0e/pyyaml_ft-8.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06237267dbcab70d4c0e9436d8f719f04a51123f0ca2694c00dd4b68c338e40b", size = 778723, upload-time = "2025-06-10T15:31:56.093Z" }, + { url = "https://files.pythonhosted.org/packages/86/ed/fed0da92b5d5d7340a082e3802d84c6dc9d5fa142954404c41a544c1cb92/pyyaml_ft-8.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8a7f332bc565817644cdb38ffe4739e44c3e18c55793f75dddb87630f03fc254", size = 758478, upload-time = "2025-06-10T15:31:58.314Z" }, + { url = "https://files.pythonhosted.org/packages/f0/69/ac02afe286275980ecb2dcdc0156617389b7e0c0a3fcdedf155c67be2b80/pyyaml_ft-8.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7d10175a746be65f6feb86224df5d6bc5c049ebf52b89a88cf1cd78af5a367a8", size = 799159, upload-time = "2025-06-10T15:31:59.675Z" }, + { url = "https://files.pythonhosted.org/packages/4e/ac/c492a9da2e39abdff4c3094ec54acac9747743f36428281fb186a03fab76/pyyaml_ft-8.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:58e1015098cf8d8aec82f360789c16283b88ca670fe4275ef6c48c5e30b22a96", size = 158779, upload-time = "2025-06-10T15:32:01.029Z" }, + { url = "https://files.pythonhosted.org/packages/5d/9b/41998df3298960d7c67653669f37710fa2d568a5fc933ea24a6df60acaf6/pyyaml_ft-8.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e64fa5f3e2ceb790d50602b2fd4ec37abbd760a8c778e46354df647e7c5a4ebb", size = 191331, upload-time = "2025-06-10T15:32:02.602Z" }, + { url = "https://files.pythonhosted.org/packages/0f/16/2710c252ee04cbd74d9562ebba709e5a284faeb8ada88fcda548c9191b47/pyyaml_ft-8.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8d445bf6ea16bb93c37b42fdacfb2f94c8e92a79ba9e12768c96ecde867046d1", size = 182879, upload-time = "2025-06-10T15:32:04.466Z" }, + { url = "https://files.pythonhosted.org/packages/9a/40/ae8163519d937fa7bfa457b6f78439cc6831a7c2b170e4f612f7eda71815/pyyaml_ft-8.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c56bb46b4fda34cbb92a9446a841da3982cdde6ea13de3fbd80db7eeeab8b49", size = 811277, upload-time = "2025-06-10T15:32:06.214Z" }, + { url = "https://files.pythonhosted.org/packages/f9/66/28d82dbff7f87b96f0eeac79b7d972a96b4980c1e445eb6a857ba91eda00/pyyaml_ft-8.0.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dab0abb46eb1780da486f022dce034b952c8ae40753627b27a626d803926483b", size = 831650, upload-time = "2025-06-10T15:32:08.076Z" }, + { url = "https://files.pythonhosted.org/packages/e8/df/161c4566facac7d75a9e182295c223060373d4116dead9cc53a265de60b9/pyyaml_ft-8.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd48d639cab5ca50ad957b6dd632c7dd3ac02a1abe0e8196a3c24a52f5db3f7a", size = 815755, upload-time = "2025-06-10T15:32:09.435Z" }, + { url = "https://files.pythonhosted.org/packages/05/10/f42c48fa5153204f42eaa945e8d1fd7c10d6296841dcb2447bf7da1be5c4/pyyaml_ft-8.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:052561b89d5b2a8e1289f326d060e794c21fa068aa11255fe71d65baf18a632e", size = 810403, upload-time = "2025-06-10T15:32:11.051Z" }, + { url = "https://files.pythonhosted.org/packages/d5/d2/e369064aa51009eb9245399fd8ad2c562bd0bcd392a00be44b2a824ded7c/pyyaml_ft-8.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3bb4b927929b0cb162fb1605392a321e3333e48ce616cdcfa04a839271373255", size = 835581, upload-time = "2025-06-10T15:32:12.897Z" }, + { url = "https://files.pythonhosted.org/packages/c0/28/26534bed77109632a956977f60d8519049f545abc39215d086e33a61f1f2/pyyaml_ft-8.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:de04cfe9439565e32f178106c51dd6ca61afaa2907d143835d501d84703d3793", size = 171579, upload-time = "2025-06-10T15:32:14.34Z" }, +] + +[[package]] +name = "pyzmq" +version = "26.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "implementation_name == 'pypy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/11/b9213d25230ac18a71b39b3723494e57adebe36e066397b961657b3b41c1/pyzmq-26.4.0.tar.gz", hash = "sha256:4bd13f85f80962f91a651a7356fe0472791a5f7a92f227822b5acf44795c626d", size = 278293, upload-time = "2025-04-04T12:05:44.049Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/b8/af1d814ffc3ff9730f9a970cbf216b6f078e5d251a25ef5201d7bc32a37c/pyzmq-26.4.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:0329bdf83e170ac133f44a233fc651f6ed66ef8e66693b5af7d54f45d1ef5918", size = 1339238, upload-time = "2025-04-04T12:03:07.022Z" }, + { url = "https://files.pythonhosted.org/packages/ee/e4/5aafed4886c264f2ea6064601ad39c5fc4e9b6539c6ebe598a859832eeee/pyzmq-26.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:398a825d2dea96227cf6460ce0a174cf7657d6f6827807d4d1ae9d0f9ae64315", size = 672848, upload-time = "2025-04-04T12:03:08.591Z" }, + { url = "https://files.pythonhosted.org/packages/79/39/026bf49c721cb42f1ef3ae0ee3d348212a7621d2adb739ba97599b6e4d50/pyzmq-26.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d52d62edc96787f5c1dfa6c6ccff9b581cfae5a70d94ec4c8da157656c73b5b", size = 911299, upload-time = "2025-04-04T12:03:10Z" }, + { url = "https://files.pythonhosted.org/packages/03/23/b41f936a9403b8f92325c823c0f264c6102a0687a99c820f1aaeb99c1def/pyzmq-26.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1410c3a3705db68d11eb2424d75894d41cff2f64d948ffe245dd97a9debfebf4", size = 867920, upload-time = "2025-04-04T12:03:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/c1/3e/2de5928cdadc2105e7c8f890cc5f404136b41ce5b6eae5902167f1d5641c/pyzmq-26.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7dacb06a9c83b007cc01e8e5277f94c95c453c5851aac5e83efe93e72226353f", size = 862514, upload-time = "2025-04-04T12:03:13.013Z" }, + { url = "https://files.pythonhosted.org/packages/ce/57/109569514dd32e05a61d4382bc88980c95bfd2f02e58fea47ec0ccd96de1/pyzmq-26.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6bab961c8c9b3a4dc94d26e9b2cdf84de9918931d01d6ff38c721a83ab3c0ef5", size = 1204494, upload-time = "2025-04-04T12:03:14.795Z" }, + { url = "https://files.pythonhosted.org/packages/aa/02/dc51068ff2ca70350d1151833643a598625feac7b632372d229ceb4de3e1/pyzmq-26.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7a5c09413b924d96af2aa8b57e76b9b0058284d60e2fc3730ce0f979031d162a", size = 1514525, upload-time = "2025-04-04T12:03:16.246Z" }, + { url = "https://files.pythonhosted.org/packages/48/2a/a7d81873fff0645eb60afaec2b7c78a85a377af8f1d911aff045d8955bc7/pyzmq-26.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7d489ac234d38e57f458fdbd12a996bfe990ac028feaf6f3c1e81ff766513d3b", size = 1414659, upload-time = "2025-04-04T12:03:17.652Z" }, + { url = "https://files.pythonhosted.org/packages/ef/ea/813af9c42ae21845c1ccfe495bd29c067622a621e85d7cda6bc437de8101/pyzmq-26.4.0-cp310-cp310-win32.whl", hash = "sha256:dea1c8db78fb1b4b7dc9f8e213d0af3fc8ecd2c51a1d5a3ca1cde1bda034a980", size = 580348, upload-time = "2025-04-04T12:03:19.384Z" }, + { url = "https://files.pythonhosted.org/packages/20/68/318666a89a565252c81d3fed7f3b4c54bd80fd55c6095988dfa2cd04a62b/pyzmq-26.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:fa59e1f5a224b5e04dc6c101d7186058efa68288c2d714aa12d27603ae93318b", size = 643838, upload-time = "2025-04-04T12:03:20.795Z" }, + { url = "https://files.pythonhosted.org/packages/91/f8/fb1a15b5f4ecd3e588bfde40c17d32ed84b735195b5c7d1d7ce88301a16f/pyzmq-26.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:a651fe2f447672f4a815e22e74630b6b1ec3a1ab670c95e5e5e28dcd4e69bbb5", size = 559565, upload-time = "2025-04-04T12:03:22.676Z" }, + { url = "https://files.pythonhosted.org/packages/32/6d/234e3b0aa82fd0290b1896e9992f56bdddf1f97266110be54d0177a9d2d9/pyzmq-26.4.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:bfcf82644c9b45ddd7cd2a041f3ff8dce4a0904429b74d73a439e8cab1bd9e54", size = 1339723, upload-time = "2025-04-04T12:03:24.358Z" }, + { url = "https://files.pythonhosted.org/packages/4f/11/6d561efe29ad83f7149a7cd48e498e539ed09019c6cd7ecc73f4cc725028/pyzmq-26.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9bcae3979b2654d5289d3490742378b2f3ce804b0b5fd42036074e2bf35b030", size = 672645, upload-time = "2025-04-04T12:03:25.693Z" }, + { url = "https://files.pythonhosted.org/packages/19/fd/81bfe3e23f418644660bad1a90f0d22f0b3eebe33dd65a79385530bceb3d/pyzmq-26.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccdff8ac4246b6fb60dcf3982dfaeeff5dd04f36051fe0632748fc0aa0679c01", size = 910133, upload-time = "2025-04-04T12:03:27.625Z" }, + { url = "https://files.pythonhosted.org/packages/97/68/321b9c775595ea3df832a9516252b653fe32818db66fdc8fa31c9b9fce37/pyzmq-26.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4550af385b442dc2d55ab7717837812799d3674cb12f9a3aa897611839c18e9e", size = 867428, upload-time = "2025-04-04T12:03:29.004Z" }, + { url = "https://files.pythonhosted.org/packages/4e/6e/159cbf2055ef36aa2aa297e01b24523176e5b48ead283c23a94179fb2ba2/pyzmq-26.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:2f9f7ffe9db1187a253fca95191854b3fda24696f086e8789d1d449308a34b88", size = 862409, upload-time = "2025-04-04T12:03:31.032Z" }, + { url = "https://files.pythonhosted.org/packages/05/1c/45fb8db7be5a7d0cadea1070a9cbded5199a2d578de2208197e592f219bd/pyzmq-26.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3709c9ff7ba61589b7372923fd82b99a81932b592a5c7f1a24147c91da9a68d6", size = 1205007, upload-time = "2025-04-04T12:03:32.687Z" }, + { url = "https://files.pythonhosted.org/packages/f8/fa/658c7f583af6498b463f2fa600f34e298e1b330886f82f1feba0dc2dd6c3/pyzmq-26.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f8f3c30fb2d26ae5ce36b59768ba60fb72507ea9efc72f8f69fa088450cff1df", size = 1514599, upload-time = "2025-04-04T12:03:34.084Z" }, + { url = "https://files.pythonhosted.org/packages/4d/d7/44d641522353ce0a2bbd150379cb5ec32f7120944e6bfba4846586945658/pyzmq-26.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:382a4a48c8080e273427fc692037e3f7d2851959ffe40864f2db32646eeb3cef", size = 1414546, upload-time = "2025-04-04T12:03:35.478Z" }, + { url = "https://files.pythonhosted.org/packages/72/76/c8ed7263218b3d1e9bce07b9058502024188bd52cc0b0a267a9513b431fc/pyzmq-26.4.0-cp311-cp311-win32.whl", hash = "sha256:d56aad0517d4c09e3b4f15adebba8f6372c5102c27742a5bdbfc74a7dceb8fca", size = 579247, upload-time = "2025-04-04T12:03:36.846Z" }, + { url = "https://files.pythonhosted.org/packages/c3/d0/2d9abfa2571a0b1a67c0ada79a8aa1ba1cce57992d80f771abcdf99bb32c/pyzmq-26.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:963977ac8baed7058c1e126014f3fe58b3773f45c78cce7af5c26c09b6823896", size = 644727, upload-time = "2025-04-04T12:03:38.578Z" }, + { url = "https://files.pythonhosted.org/packages/0d/d1/c8ad82393be6ccedfc3c9f3adb07f8f3976e3c4802640fe3f71441941e70/pyzmq-26.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0c8e8cadc81e44cc5088fcd53b9b3b4ce9344815f6c4a03aec653509296fae3", size = 559942, upload-time = "2025-04-04T12:03:40.143Z" }, + { url = "https://files.pythonhosted.org/packages/10/44/a778555ebfdf6c7fc00816aad12d185d10a74d975800341b1bc36bad1187/pyzmq-26.4.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:5227cb8da4b6f68acfd48d20c588197fd67745c278827d5238c707daf579227b", size = 1341586, upload-time = "2025-04-04T12:03:41.954Z" }, + { url = "https://files.pythonhosted.org/packages/9c/4f/f3a58dc69ac757e5103be3bd41fb78721a5e17da7cc617ddb56d973a365c/pyzmq-26.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1c07a7fa7f7ba86554a2b1bef198c9fed570c08ee062fd2fd6a4dcacd45f905", size = 665880, upload-time = "2025-04-04T12:03:43.45Z" }, + { url = "https://files.pythonhosted.org/packages/fe/45/50230bcfb3ae5cb98bee683b6edeba1919f2565d7cc1851d3c38e2260795/pyzmq-26.4.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae775fa83f52f52de73183f7ef5395186f7105d5ed65b1ae65ba27cb1260de2b", size = 902216, upload-time = "2025-04-04T12:03:45.572Z" }, + { url = "https://files.pythonhosted.org/packages/41/59/56bbdc5689be5e13727491ad2ba5efd7cd564365750514f9bc8f212eef82/pyzmq-26.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c760d0226ebd52f1e6b644a9e839b5db1e107a23f2fcd46ec0569a4fdd4e63", size = 859814, upload-time = "2025-04-04T12:03:47.188Z" }, + { url = "https://files.pythonhosted.org/packages/81/b1/57db58cfc8af592ce94f40649bd1804369c05b2190e4cbc0a2dad572baeb/pyzmq-26.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ef8c6ecc1d520debc147173eaa3765d53f06cd8dbe7bd377064cdbc53ab456f5", size = 855889, upload-time = "2025-04-04T12:03:49.223Z" }, + { url = "https://files.pythonhosted.org/packages/e8/92/47542e629cbac8f221c230a6d0f38dd3d9cff9f6f589ed45fdf572ffd726/pyzmq-26.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3150ef4084e163dec29ae667b10d96aad309b668fac6810c9e8c27cf543d6e0b", size = 1197153, upload-time = "2025-04-04T12:03:50.591Z" }, + { url = "https://files.pythonhosted.org/packages/07/e5/b10a979d1d565d54410afc87499b16c96b4a181af46e7645ab4831b1088c/pyzmq-26.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4448c9e55bf8329fa1dcedd32f661bf611214fa70c8e02fee4347bc589d39a84", size = 1507352, upload-time = "2025-04-04T12:03:52.473Z" }, + { url = "https://files.pythonhosted.org/packages/ab/58/5a23db84507ab9c01c04b1232a7a763be66e992aa2e66498521bbbc72a71/pyzmq-26.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e07dde3647afb084d985310d067a3efa6efad0621ee10826f2cb2f9a31b89d2f", size = 1406834, upload-time = "2025-04-04T12:03:54Z" }, + { url = "https://files.pythonhosted.org/packages/22/74/aaa837b331580c13b79ac39396601fb361454ee184ca85e8861914769b99/pyzmq-26.4.0-cp312-cp312-win32.whl", hash = "sha256:ba034a32ecf9af72adfa5ee383ad0fd4f4e38cdb62b13624278ef768fe5b5b44", size = 577992, upload-time = "2025-04-04T12:03:55.815Z" }, + { url = "https://files.pythonhosted.org/packages/30/0f/55f8c02c182856743b82dde46b2dc3e314edda7f1098c12a8227eeda0833/pyzmq-26.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:056a97aab4064f526ecb32f4343917a4022a5d9efb6b9df990ff72e1879e40be", size = 640466, upload-time = "2025-04-04T12:03:57.231Z" }, + { url = "https://files.pythonhosted.org/packages/e4/29/073779afc3ef6f830b8de95026ef20b2d1ec22d0324d767748d806e57379/pyzmq-26.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:2f23c750e485ce1eb639dbd576d27d168595908aa2d60b149e2d9e34c9df40e0", size = 556342, upload-time = "2025-04-04T12:03:59.218Z" }, + { url = "https://files.pythonhosted.org/packages/d7/20/fb2c92542488db70f833b92893769a569458311a76474bda89dc4264bd18/pyzmq-26.4.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:c43fac689880f5174d6fc864857d1247fe5cfa22b09ed058a344ca92bf5301e3", size = 1339484, upload-time = "2025-04-04T12:04:00.671Z" }, + { url = "https://files.pythonhosted.org/packages/58/29/2f06b9cabda3a6ea2c10f43e67ded3e47fc25c54822e2506dfb8325155d4/pyzmq-26.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:902aca7eba477657c5fb81c808318460328758e8367ecdd1964b6330c73cae43", size = 666106, upload-time = "2025-04-04T12:04:02.366Z" }, + { url = "https://files.pythonhosted.org/packages/77/e4/dcf62bd29e5e190bd21bfccaa4f3386e01bf40d948c239239c2f1e726729/pyzmq-26.4.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5e48a830bfd152fe17fbdeaf99ac5271aa4122521bf0d275b6b24e52ef35eb6", size = 902056, upload-time = "2025-04-04T12:04:03.919Z" }, + { url = "https://files.pythonhosted.org/packages/1a/cf/b36b3d7aea236087d20189bec1a87eeb2b66009731d7055e5c65f845cdba/pyzmq-26.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31be2b6de98c824c06f5574331f805707c667dc8f60cb18580b7de078479891e", size = 860148, upload-time = "2025-04-04T12:04:05.581Z" }, + { url = "https://files.pythonhosted.org/packages/18/a6/f048826bc87528c208e90604c3bf573801e54bd91e390cbd2dfa860e82dc/pyzmq-26.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6332452034be001bbf3206ac59c0d2a7713de5f25bb38b06519fc6967b7cf771", size = 855983, upload-time = "2025-04-04T12:04:07.096Z" }, + { url = "https://files.pythonhosted.org/packages/0a/27/454d34ab6a1d9772a36add22f17f6b85baf7c16e14325fa29e7202ca8ee8/pyzmq-26.4.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:da8c0f5dd352136853e6a09b1b986ee5278dfddfebd30515e16eae425c872b30", size = 1197274, upload-time = "2025-04-04T12:04:08.523Z" }, + { url = "https://files.pythonhosted.org/packages/f4/3d/7abfeab6b83ad38aa34cbd57c6fc29752c391e3954fd12848bd8d2ec0df6/pyzmq-26.4.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f4ccc1a0a2c9806dda2a2dd118a3b7b681e448f3bb354056cad44a65169f6d86", size = 1507120, upload-time = "2025-04-04T12:04:10.58Z" }, + { url = "https://files.pythonhosted.org/packages/13/ff/bc8d21dbb9bc8705126e875438a1969c4f77e03fc8565d6901c7933a3d01/pyzmq-26.4.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1c0b5fceadbab461578daf8d1dcc918ebe7ddd2952f748cf30c7cf2de5d51101", size = 1406738, upload-time = "2025-04-04T12:04:12.509Z" }, + { url = "https://files.pythonhosted.org/packages/f5/5d/d4cd85b24de71d84d81229e3bbb13392b2698432cf8fdcea5afda253d587/pyzmq-26.4.0-cp313-cp313-win32.whl", hash = "sha256:28e2b0ff5ba4b3dd11062d905682bad33385cfa3cc03e81abd7f0822263e6637", size = 577826, upload-time = "2025-04-04T12:04:14.289Z" }, + { url = "https://files.pythonhosted.org/packages/c6/6c/f289c1789d7bb6e5a3b3bef7b2a55089b8561d17132be7d960d3ff33b14e/pyzmq-26.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:23ecc9d241004c10e8b4f49d12ac064cd7000e1643343944a10df98e57bc544b", size = 640406, upload-time = "2025-04-04T12:04:15.757Z" }, + { url = "https://files.pythonhosted.org/packages/b3/99/676b8851cb955eb5236a0c1e9ec679ea5ede092bf8bf2c8a68d7e965cac3/pyzmq-26.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:1edb0385c7f025045d6e0f759d4d3afe43c17a3d898914ec6582e6f464203c08", size = 556216, upload-time = "2025-04-04T12:04:17.212Z" }, + { url = "https://files.pythonhosted.org/packages/65/c2/1fac340de9d7df71efc59d9c50fc7a635a77b103392d1842898dd023afcb/pyzmq-26.4.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:93a29e882b2ba1db86ba5dd5e88e18e0ac6b627026c5cfbec9983422011b82d4", size = 1333769, upload-time = "2025-04-04T12:04:18.665Z" }, + { url = "https://files.pythonhosted.org/packages/5c/c7/6c03637e8d742c3b00bec4f5e4cd9d1c01b2f3694c6f140742e93ca637ed/pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb45684f276f57110bb89e4300c00f1233ca631f08f5f42528a5c408a79efc4a", size = 658826, upload-time = "2025-04-04T12:04:20.405Z" }, + { url = "https://files.pythonhosted.org/packages/a5/97/a8dca65913c0f78e0545af2bb5078aebfc142ca7d91cdaffa1fbc73e5dbd/pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f72073e75260cb301aad4258ad6150fa7f57c719b3f498cb91e31df16784d89b", size = 891650, upload-time = "2025-04-04T12:04:22.413Z" }, + { url = "https://files.pythonhosted.org/packages/7d/7e/f63af1031eb060bf02d033732b910fe48548dcfdbe9c785e9f74a6cc6ae4/pyzmq-26.4.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be37e24b13026cfedd233bcbbccd8c0bcd2fdd186216094d095f60076201538d", size = 849776, upload-time = "2025-04-04T12:04:23.959Z" }, + { url = "https://files.pythonhosted.org/packages/f6/fa/1a009ce582802a895c0d5fe9413f029c940a0a8ee828657a3bb0acffd88b/pyzmq-26.4.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:237b283044934d26f1eeff4075f751b05d2f3ed42a257fc44386d00df6a270cf", size = 842516, upload-time = "2025-04-04T12:04:25.449Z" }, + { url = "https://files.pythonhosted.org/packages/6e/bc/f88b0bad0f7a7f500547d71e99f10336f2314e525d4ebf576a1ea4a1d903/pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:b30f862f6768b17040929a68432c8a8be77780317f45a353cb17e423127d250c", size = 1189183, upload-time = "2025-04-04T12:04:27.035Z" }, + { url = "https://files.pythonhosted.org/packages/d9/8c/db446a3dd9cf894406dec2e61eeffaa3c07c3abb783deaebb9812c4af6a5/pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:c80fcd3504232f13617c6ab501124d373e4895424e65de8b72042333316f64a8", size = 1495501, upload-time = "2025-04-04T12:04:28.833Z" }, + { url = "https://files.pythonhosted.org/packages/05/4c/bf3cad0d64c3214ac881299c4562b815f05d503bccc513e3fd4fdc6f67e4/pyzmq-26.4.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:26a2a7451606b87f67cdeca2c2789d86f605da08b4bd616b1a9981605ca3a364", size = 1395540, upload-time = "2025-04-04T12:04:30.562Z" }, + { url = "https://files.pythonhosted.org/packages/06/91/21d3af57bc77e86e9d1e5384f256fd25cdb4c8eed4c45c8119da8120915f/pyzmq-26.4.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:a88643de8abd000ce99ca72056a1a2ae15881ee365ecb24dd1d9111e43d57842", size = 1340634, upload-time = "2025-04-04T12:04:47.661Z" }, + { url = "https://files.pythonhosted.org/packages/54/e6/58cd825023e998a0e49db7322b3211e6cf93f0796710b77d1496304c10d1/pyzmq-26.4.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0a744ce209ecb557406fb928f3c8c55ce79b16c3eeb682da38ef5059a9af0848", size = 907880, upload-time = "2025-04-04T12:04:49.294Z" }, + { url = "https://files.pythonhosted.org/packages/72/83/619e44a766ef738cb7e8ed8e5a54565627801bdb027ca6dfb70762385617/pyzmq-26.4.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9434540f333332224ecb02ee6278b6c6f11ea1266b48526e73c903119b2f420f", size = 863003, upload-time = "2025-04-04T12:04:51Z" }, + { url = "https://files.pythonhosted.org/packages/b6/6a/a59af31320598bdc63d2c5a3181d14a89673c2c794540678285482e8a342/pyzmq-26.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6c6f0a23e55cd38d27d4c89add963294ea091ebcb104d7fdab0f093bc5abb1c", size = 673432, upload-time = "2025-04-04T12:04:52.611Z" }, + { url = "https://files.pythonhosted.org/packages/29/ae/64dd6c18b08ce2cb009c60f11cf01c87f323acd80344d8b059c0304a7370/pyzmq-26.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6145df55dc2309f6ef72d70576dcd5aabb0fd373311613fe85a5e547c722b780", size = 1205221, upload-time = "2025-04-04T12:04:54.31Z" }, + { url = "https://files.pythonhosted.org/packages/d0/0b/c583ab750957b025244a66948831bc9ca486d11c820da4626caf6480ee1a/pyzmq-26.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2ea81823840ef8c56e5d2f9918e4d571236294fea4d1842b302aebffb9e40997", size = 1515299, upload-time = "2025-04-04T12:04:56.063Z" }, + { url = "https://files.pythonhosted.org/packages/22/ba/95ba76292c49dd9c6dff1f127b4867033020b708d101cba6e4fc5a3d166d/pyzmq-26.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cc2abc385dc37835445abe206524fbc0c9e3fce87631dfaa90918a1ba8f425eb", size = 1415366, upload-time = "2025-04-04T12:04:58.241Z" }, + { url = "https://files.pythonhosted.org/packages/6e/65/51abe36169effda26ac7400ffac96f463e09dff40d344cdc2629d9a59162/pyzmq-26.4.0-cp39-cp39-win32.whl", hash = "sha256:41a2508fe7bed4c76b4cf55aacfb8733926f59d440d9ae2b81ee8220633b4d12", size = 580773, upload-time = "2025-04-04T12:04:59.786Z" }, + { url = "https://files.pythonhosted.org/packages/89/68/d9ac94086c63a0ed8d73e9e8aec54b39f481696698a5a939a7207629fb30/pyzmq-26.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:d4000e8255d6cbce38982e5622ebb90823f3409b7ffe8aeae4337ef7d6d2612a", size = 644340, upload-time = "2025-04-04T12:05:01.389Z" }, + { url = "https://files.pythonhosted.org/packages/dc/8f/66c261d657c1b0791ee5b372c90b1646b453adb581fcdc1dc5c94e5b03e3/pyzmq-26.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:b4f6919d9c120488246bdc2a2f96662fa80d67b35bd6d66218f457e722b3ff64", size = 560075, upload-time = "2025-04-04T12:05:02.975Z" }, + { url = "https://files.pythonhosted.org/packages/47/03/96004704a84095f493be8d2b476641f5c967b269390173f85488a53c1c13/pyzmq-26.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:98d948288ce893a2edc5ec3c438fe8de2daa5bbbd6e2e865ec5f966e237084ba", size = 834408, upload-time = "2025-04-04T12:05:04.569Z" }, + { url = "https://files.pythonhosted.org/packages/e4/7f/68d8f3034a20505db7551cb2260248be28ca66d537a1ac9a257913d778e4/pyzmq-26.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9f34f5c9e0203ece706a1003f1492a56c06c0632d86cb77bcfe77b56aacf27b", size = 569580, upload-time = "2025-04-04T12:05:06.283Z" }, + { url = "https://files.pythonhosted.org/packages/9b/a6/2b0d6801ec33f2b2a19dd8d02e0a1e8701000fec72926e6787363567d30c/pyzmq-26.4.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80c9b48aef586ff8b698359ce22f9508937c799cc1d2c9c2f7c95996f2300c94", size = 798250, upload-time = "2025-04-04T12:05:07.88Z" }, + { url = "https://files.pythonhosted.org/packages/96/2a/0322b3437de977dcac8a755d6d7ce6ec5238de78e2e2d9353730b297cf12/pyzmq-26.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f2a5b74009fd50b53b26f65daff23e9853e79aa86e0aa08a53a7628d92d44a", size = 756758, upload-time = "2025-04-04T12:05:09.483Z" }, + { url = "https://files.pythonhosted.org/packages/c2/33/43704f066369416d65549ccee366cc19153911bec0154da7c6b41fca7e78/pyzmq-26.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:61c5f93d7622d84cb3092d7f6398ffc77654c346545313a3737e266fc11a3beb", size = 555371, upload-time = "2025-04-04T12:05:11.062Z" }, + { url = "https://files.pythonhosted.org/packages/04/52/a70fcd5592715702248306d8e1729c10742c2eac44529984413b05c68658/pyzmq-26.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4478b14cb54a805088299c25a79f27eaf530564a7a4f72bf432a040042b554eb", size = 834405, upload-time = "2025-04-04T12:05:13.3Z" }, + { url = "https://files.pythonhosted.org/packages/25/f9/1a03f1accff16b3af1a6fa22cbf7ced074776abbf688b2e9cb4629700c62/pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a28ac29c60e4ba84b5f58605ace8ad495414a724fe7aceb7cf06cd0598d04e1", size = 569578, upload-time = "2025-04-04T12:05:15.36Z" }, + { url = "https://files.pythonhosted.org/packages/76/0c/3a633acd762aa6655fcb71fa841907eae0ab1e8582ff494b137266de341d/pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43b03c1ceea27c6520124f4fb2ba9c647409b9abdf9a62388117148a90419494", size = 798248, upload-time = "2025-04-04T12:05:17.376Z" }, + { url = "https://files.pythonhosted.org/packages/cd/cc/6c99c84aa60ac1cc56747bed6be8ce6305b9b861d7475772e7a25ce019d3/pyzmq-26.4.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7731abd23a782851426d4e37deb2057bf9410848a4459b5ede4fe89342e687a9", size = 756757, upload-time = "2025-04-04T12:05:19.19Z" }, + { url = "https://files.pythonhosted.org/packages/13/9c/d8073bd898eb896e94c679abe82e47506e2b750eb261cf6010ced869797c/pyzmq-26.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a222ad02fbe80166b0526c038776e8042cd4e5f0dec1489a006a1df47e9040e0", size = 555371, upload-time = "2025-04-04T12:05:20.702Z" }, + { url = "https://files.pythonhosted.org/packages/af/b2/71a644b629e1a93ccae9e22a45aec9d23065dfcc24c399cb837f81cd08c2/pyzmq-26.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:552b0d2e39987733e1e9e948a0ced6ff75e0ea39ab1a1db2fc36eb60fd8760db", size = 834397, upload-time = "2025-04-04T12:05:31.217Z" }, + { url = "https://files.pythonhosted.org/packages/a9/dd/052a25651eaaff8f5fd652fb40a3abb400e71207db2d605cf6faf0eac598/pyzmq-26.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd670a8aa843f2ee637039bbd412e0d7294a5e588e1ecc9ad98b0cdc050259a4", size = 569571, upload-time = "2025-04-04T12:05:32.877Z" }, + { url = "https://files.pythonhosted.org/packages/a5/5d/201ca10b5d12ab187a418352c06d70c3e2087310af038b11056aba1359be/pyzmq-26.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d367b7b775a0e1e54a59a2ba3ed4d5e0a31566af97cc9154e34262777dab95ed", size = 798243, upload-time = "2025-04-04T12:05:34.91Z" }, + { url = "https://files.pythonhosted.org/packages/bd/d4/2c64e54749536ad1633400f28d71e71e19375d00ce1fe9bb1123364dc927/pyzmq-26.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112af16c406e4a93df2caef49f884f4c2bb2b558b0b5577ef0b2465d15c1abc", size = 756751, upload-time = "2025-04-04T12:05:37.12Z" }, + { url = "https://files.pythonhosted.org/packages/08/e6/34d119af43d06a8dcd88bf7a62dac69597eaba52b49ecce76ff06b40f1fd/pyzmq-26.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c76c298683f82669cab0b6da59071f55238c039738297c69f187a542c6d40099", size = 745400, upload-time = "2025-04-04T12:05:40.694Z" }, + { url = "https://files.pythonhosted.org/packages/f8/49/b5e471d74a63318e51f30d329b17d2550bdededaab55baed2e2499de7ce4/pyzmq-26.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:49b6ca2e625b46f499fb081aaf7819a177f41eeb555acb05758aa97f4f95d147", size = 555367, upload-time = "2025-04-04T12:05:42.356Z" }, +] + +[[package]] +name = "referencing" +version = "0.36.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775, upload-time = "2025-01-25T08:48:14.241Z" }, +] + +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/ea/a9387748e2d111c3c2b275ba970b735e04e15cdb1eb30693b6b5708c4dbd/rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b", size = 5513, upload-time = "2021-05-12T16:37:54.178Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/44/4e421b96b67b2daff264473f7465db72fbdf36a07e05494f50300cc7b0c6/rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa", size = 3490, upload-time = "2021-05-12T16:37:52.536Z" }, +] + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/88/f270de456dd7d11dcc808abfa291ecdd3f45ff44e3b549ffa01b126464d0/rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055", size = 6760, upload-time = "2019-10-28T16:00:19.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/51/17023c0f8f1869d8806b979a2bffa3f861f26a3f1a66b094288323fba52f/rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9", size = 4242, upload-time = "2019-10-28T16:00:13.976Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.25.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/a6/60184b7fc00dd3ca80ac635dd5b8577d444c57e8e8742cecabfacb829921/rpds_py-0.25.1.tar.gz", hash = "sha256:8960b6dac09b62dac26e75d7e2c4a22efb835d827a7278c34f72b2b84fa160e3", size = 27304, upload-time = "2025-05-21T12:46:12.502Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/09/e1158988e50905b7f8306487a576b52d32aa9a87f79f7ab24ee8db8b6c05/rpds_py-0.25.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f4ad628b5174d5315761b67f212774a32f5bad5e61396d38108bd801c0a8f5d9", size = 373140, upload-time = "2025-05-21T12:42:38.834Z" }, + { url = "https://files.pythonhosted.org/packages/e0/4b/a284321fb3c45c02fc74187171504702b2934bfe16abab89713eedfe672e/rpds_py-0.25.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c742af695f7525e559c16f1562cf2323db0e3f0fbdcabdf6865b095256b2d40", size = 358860, upload-time = "2025-05-21T12:42:41.394Z" }, + { url = "https://files.pythonhosted.org/packages/4e/46/8ac9811150c75edeae9fc6fa0e70376c19bc80f8e1f7716981433905912b/rpds_py-0.25.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:605ffe7769e24b1800b4d024d24034405d9404f0bc2f55b6db3362cd34145a6f", size = 386179, upload-time = "2025-05-21T12:42:43.213Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ec/87eb42d83e859bce91dcf763eb9f2ab117142a49c9c3d17285440edb5b69/rpds_py-0.25.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ccc6f3ddef93243538be76f8e47045b4aad7a66a212cd3a0f23e34469473d36b", size = 400282, upload-time = "2025-05-21T12:42:44.92Z" }, + { url = "https://files.pythonhosted.org/packages/68/c8/2a38e0707d7919c8c78e1d582ab15cf1255b380bcb086ca265b73ed6db23/rpds_py-0.25.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f70316f760174ca04492b5ab01be631a8ae30cadab1d1081035136ba12738cfa", size = 521824, upload-time = "2025-05-21T12:42:46.856Z" }, + { url = "https://files.pythonhosted.org/packages/5e/2c/6a92790243569784dde84d144bfd12bd45102f4a1c897d76375076d730ab/rpds_py-0.25.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1dafef8df605fdb46edcc0bf1573dea0d6d7b01ba87f85cd04dc855b2b4479e", size = 411644, upload-time = "2025-05-21T12:42:48.838Z" }, + { url = "https://files.pythonhosted.org/packages/eb/76/66b523ffc84cf47db56efe13ae7cf368dee2bacdec9d89b9baca5e2e6301/rpds_py-0.25.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0701942049095741a8aeb298a31b203e735d1c61f4423511d2b1a41dcd8a16da", size = 386955, upload-time = "2025-05-21T12:42:50.835Z" }, + { url = "https://files.pythonhosted.org/packages/b6/b9/a362d7522feaa24dc2b79847c6175daa1c642817f4a19dcd5c91d3e2c316/rpds_py-0.25.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e87798852ae0b37c88babb7f7bbbb3e3fecc562a1c340195b44c7e24d403e380", size = 421039, upload-time = "2025-05-21T12:42:52.348Z" }, + { url = "https://files.pythonhosted.org/packages/0f/c4/b5b6f70b4d719b6584716889fd3413102acf9729540ee76708d56a76fa97/rpds_py-0.25.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3bcce0edc1488906c2d4c75c94c70a0417e83920dd4c88fec1078c94843a6ce9", size = 563290, upload-time = "2025-05-21T12:42:54.404Z" }, + { url = "https://files.pythonhosted.org/packages/87/a3/2e6e816615c12a8f8662c9d8583a12eb54c52557521ef218cbe3095a8afa/rpds_py-0.25.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e2f6a2347d3440ae789505693a02836383426249d5293541cd712e07e7aecf54", size = 592089, upload-time = "2025-05-21T12:42:55.976Z" }, + { url = "https://files.pythonhosted.org/packages/c0/08/9b8e1050e36ce266135994e2c7ec06e1841f1c64da739daeb8afe9cb77a4/rpds_py-0.25.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4fd52d3455a0aa997734f3835cbc4c9f32571345143960e7d7ebfe7b5fbfa3b2", size = 558400, upload-time = "2025-05-21T12:42:58.032Z" }, + { url = "https://files.pythonhosted.org/packages/f2/df/b40b8215560b8584baccd839ff5c1056f3c57120d79ac41bd26df196da7e/rpds_py-0.25.1-cp310-cp310-win32.whl", hash = "sha256:3f0b1798cae2bbbc9b9db44ee068c556d4737911ad53a4e5093d09d04b3bbc24", size = 219741, upload-time = "2025-05-21T12:42:59.479Z" }, + { url = "https://files.pythonhosted.org/packages/10/99/e4c58be18cf5d8b40b8acb4122bc895486230b08f978831b16a3916bd24d/rpds_py-0.25.1-cp310-cp310-win_amd64.whl", hash = "sha256:3ebd879ab996537fc510a2be58c59915b5dd63bccb06d1ef514fee787e05984a", size = 231553, upload-time = "2025-05-21T12:43:01.425Z" }, + { url = "https://files.pythonhosted.org/packages/95/e1/df13fe3ddbbea43567e07437f097863b20c99318ae1f58a0fe389f763738/rpds_py-0.25.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5f048bbf18b1f9120685c6d6bb70cc1a52c8cc11bdd04e643d28d3be0baf666d", size = 373341, upload-time = "2025-05-21T12:43:02.978Z" }, + { url = "https://files.pythonhosted.org/packages/7a/58/deef4d30fcbcbfef3b6d82d17c64490d5c94585a2310544ce8e2d3024f83/rpds_py-0.25.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fbb0dbba559959fcb5d0735a0f87cdbca9e95dac87982e9b95c0f8f7ad10255", size = 359111, upload-time = "2025-05-21T12:43:05.128Z" }, + { url = "https://files.pythonhosted.org/packages/bb/7e/39f1f4431b03e96ebaf159e29a0f82a77259d8f38b2dd474721eb3a8ac9b/rpds_py-0.25.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4ca54b9cf9d80b4016a67a0193ebe0bcf29f6b0a96f09db942087e294d3d4c2", size = 386112, upload-time = "2025-05-21T12:43:07.13Z" }, + { url = "https://files.pythonhosted.org/packages/db/e7/847068a48d63aec2ae695a1646089620b3b03f8ccf9f02c122ebaf778f3c/rpds_py-0.25.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ee3e26eb83d39b886d2cb6e06ea701bba82ef30a0de044d34626ede51ec98b0", size = 400362, upload-time = "2025-05-21T12:43:08.693Z" }, + { url = "https://files.pythonhosted.org/packages/3b/3d/9441d5db4343d0cee759a7ab4d67420a476cebb032081763de934719727b/rpds_py-0.25.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89706d0683c73a26f76a5315d893c051324d771196ae8b13e6ffa1ffaf5e574f", size = 522214, upload-time = "2025-05-21T12:43:10.694Z" }, + { url = "https://files.pythonhosted.org/packages/a2/ec/2cc5b30d95f9f1a432c79c7a2f65d85e52812a8f6cbf8768724571710786/rpds_py-0.25.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2013ee878c76269c7b557a9a9c042335d732e89d482606990b70a839635feb7", size = 411491, upload-time = "2025-05-21T12:43:12.739Z" }, + { url = "https://files.pythonhosted.org/packages/dc/6c/44695c1f035077a017dd472b6a3253553780837af2fac9b6ac25f6a5cb4d/rpds_py-0.25.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45e484db65e5380804afbec784522de84fa95e6bb92ef1bd3325d33d13efaebd", size = 386978, upload-time = "2025-05-21T12:43:14.25Z" }, + { url = "https://files.pythonhosted.org/packages/b1/74/b4357090bb1096db5392157b4e7ed8bb2417dc7799200fcbaee633a032c9/rpds_py-0.25.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:48d64155d02127c249695abb87d39f0faf410733428d499867606be138161d65", size = 420662, upload-time = "2025-05-21T12:43:15.8Z" }, + { url = "https://files.pythonhosted.org/packages/26/dd/8cadbebf47b96e59dfe8b35868e5c38a42272699324e95ed522da09d3a40/rpds_py-0.25.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:048893e902132fd6548a2e661fb38bf4896a89eea95ac5816cf443524a85556f", size = 563385, upload-time = "2025-05-21T12:43:17.78Z" }, + { url = "https://files.pythonhosted.org/packages/c3/ea/92960bb7f0e7a57a5ab233662f12152085c7dc0d5468534c65991a3d48c9/rpds_py-0.25.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0317177b1e8691ab5879f4f33f4b6dc55ad3b344399e23df2e499de7b10a548d", size = 592047, upload-time = "2025-05-21T12:43:19.457Z" }, + { url = "https://files.pythonhosted.org/packages/61/ad/71aabc93df0d05dabcb4b0c749277881f8e74548582d96aa1bf24379493a/rpds_py-0.25.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bffcf57826d77a4151962bf1701374e0fc87f536e56ec46f1abdd6a903354042", size = 557863, upload-time = "2025-05-21T12:43:21.69Z" }, + { url = "https://files.pythonhosted.org/packages/93/0f/89df0067c41f122b90b76f3660028a466eb287cbe38efec3ea70e637ca78/rpds_py-0.25.1-cp311-cp311-win32.whl", hash = "sha256:cda776f1967cb304816173b30994faaf2fd5bcb37e73118a47964a02c348e1bc", size = 219627, upload-time = "2025-05-21T12:43:23.311Z" }, + { url = "https://files.pythonhosted.org/packages/7c/8d/93b1a4c1baa903d0229374d9e7aa3466d751f1d65e268c52e6039c6e338e/rpds_py-0.25.1-cp311-cp311-win_amd64.whl", hash = "sha256:dc3c1ff0abc91444cd20ec643d0f805df9a3661fcacf9c95000329f3ddf268a4", size = 231603, upload-time = "2025-05-21T12:43:25.145Z" }, + { url = "https://files.pythonhosted.org/packages/cb/11/392605e5247bead2f23e6888e77229fbd714ac241ebbebb39a1e822c8815/rpds_py-0.25.1-cp311-cp311-win_arm64.whl", hash = "sha256:5a3ddb74b0985c4387719fc536faced33cadf2172769540c62e2a94b7b9be1c4", size = 223967, upload-time = "2025-05-21T12:43:26.566Z" }, + { url = "https://files.pythonhosted.org/packages/7f/81/28ab0408391b1dc57393653b6a0cf2014cc282cc2909e4615e63e58262be/rpds_py-0.25.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5ffe453cde61f73fea9430223c81d29e2fbf412a6073951102146c84e19e34c", size = 364647, upload-time = "2025-05-21T12:43:28.559Z" }, + { url = "https://files.pythonhosted.org/packages/2c/9a/7797f04cad0d5e56310e1238434f71fc6939d0bc517192a18bb99a72a95f/rpds_py-0.25.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:115874ae5e2fdcfc16b2aedc95b5eef4aebe91b28e7e21951eda8a5dc0d3461b", size = 350454, upload-time = "2025-05-21T12:43:30.615Z" }, + { url = "https://files.pythonhosted.org/packages/69/3c/93d2ef941b04898011e5d6eaa56a1acf46a3b4c9f4b3ad1bbcbafa0bee1f/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a714bf6e5e81b0e570d01f56e0c89c6375101b8463999ead3a93a5d2a4af91fa", size = 389665, upload-time = "2025-05-21T12:43:32.629Z" }, + { url = "https://files.pythonhosted.org/packages/c1/57/ad0e31e928751dde8903a11102559628d24173428a0f85e25e187defb2c1/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35634369325906bcd01577da4c19e3b9541a15e99f31e91a02d010816b49bfda", size = 403873, upload-time = "2025-05-21T12:43:34.576Z" }, + { url = "https://files.pythonhosted.org/packages/16/ad/c0c652fa9bba778b4f54980a02962748479dc09632e1fd34e5282cf2556c/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4cb2b3ddc16710548801c6fcc0cfcdeeff9dafbc983f77265877793f2660309", size = 525866, upload-time = "2025-05-21T12:43:36.123Z" }, + { url = "https://files.pythonhosted.org/packages/2a/39/3e1839bc527e6fcf48d5fec4770070f872cdee6c6fbc9b259932f4e88a38/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ceca1cf097ed77e1a51f1dbc8d174d10cb5931c188a4505ff9f3e119dfe519b", size = 416886, upload-time = "2025-05-21T12:43:38.034Z" }, + { url = "https://files.pythonhosted.org/packages/7a/95/dd6b91cd4560da41df9d7030a038298a67d24f8ca38e150562644c829c48/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c2cd1a4b0c2b8c5e31ffff50d09f39906fe351389ba143c195566056c13a7ea", size = 390666, upload-time = "2025-05-21T12:43:40.065Z" }, + { url = "https://files.pythonhosted.org/packages/64/48/1be88a820e7494ce0a15c2d390ccb7c52212370badabf128e6a7bb4cb802/rpds_py-0.25.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de336a4b164c9188cb23f3703adb74a7623ab32d20090d0e9bf499a2203ad65", size = 425109, upload-time = "2025-05-21T12:43:42.263Z" }, + { url = "https://files.pythonhosted.org/packages/cf/07/3e2a17927ef6d7720b9949ec1b37d1e963b829ad0387f7af18d923d5cfa5/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9fca84a15333e925dd59ce01da0ffe2ffe0d6e5d29a9eeba2148916d1824948c", size = 567244, upload-time = "2025-05-21T12:43:43.846Z" }, + { url = "https://files.pythonhosted.org/packages/d2/e5/76cf010998deccc4f95305d827847e2eae9c568099c06b405cf96384762b/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88ec04afe0c59fa64e2f6ea0dd9657e04fc83e38de90f6de201954b4d4eb59bd", size = 596023, upload-time = "2025-05-21T12:43:45.932Z" }, + { url = "https://files.pythonhosted.org/packages/52/9a/df55efd84403736ba37a5a6377b70aad0fd1cb469a9109ee8a1e21299a1c/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8bd2f19e312ce3e1d2c635618e8a8d8132892bb746a7cf74780a489f0f6cdcb", size = 561634, upload-time = "2025-05-21T12:43:48.263Z" }, + { url = "https://files.pythonhosted.org/packages/ab/aa/dc3620dd8db84454aaf9374bd318f1aa02578bba5e567f5bf6b79492aca4/rpds_py-0.25.1-cp312-cp312-win32.whl", hash = "sha256:e5e2f7280d8d0d3ef06f3ec1b4fd598d386cc6f0721e54f09109a8132182fbfe", size = 222713, upload-time = "2025-05-21T12:43:49.897Z" }, + { url = "https://files.pythonhosted.org/packages/a3/7f/7cef485269a50ed5b4e9bae145f512d2a111ca638ae70cc101f661b4defd/rpds_py-0.25.1-cp312-cp312-win_amd64.whl", hash = "sha256:db58483f71c5db67d643857404da360dce3573031586034b7d59f245144cc192", size = 235280, upload-time = "2025-05-21T12:43:51.893Z" }, + { url = "https://files.pythonhosted.org/packages/99/f2/c2d64f6564f32af913bf5f3f7ae41c7c263c5ae4c4e8f1a17af8af66cd46/rpds_py-0.25.1-cp312-cp312-win_arm64.whl", hash = "sha256:6d50841c425d16faf3206ddbba44c21aa3310a0cebc3c1cdfc3e3f4f9f6f5728", size = 225399, upload-time = "2025-05-21T12:43:53.351Z" }, + { url = "https://files.pythonhosted.org/packages/2b/da/323848a2b62abe6a0fec16ebe199dc6889c5d0a332458da8985b2980dffe/rpds_py-0.25.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:659d87430a8c8c704d52d094f5ba6fa72ef13b4d385b7e542a08fc240cb4a559", size = 364498, upload-time = "2025-05-21T12:43:54.841Z" }, + { url = "https://files.pythonhosted.org/packages/1f/b4/4d3820f731c80fd0cd823b3e95b9963fec681ae45ba35b5281a42382c67d/rpds_py-0.25.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68f6f060f0bbdfb0245267da014d3a6da9be127fe3e8cc4a68c6f833f8a23bb1", size = 350083, upload-time = "2025-05-21T12:43:56.428Z" }, + { url = "https://files.pythonhosted.org/packages/d5/b1/3a8ee1c9d480e8493619a437dec685d005f706b69253286f50f498cbdbcf/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:083a9513a33e0b92cf6e7a6366036c6bb43ea595332c1ab5c8ae329e4bcc0a9c", size = 389023, upload-time = "2025-05-21T12:43:57.995Z" }, + { url = "https://files.pythonhosted.org/packages/3b/31/17293edcfc934dc62c3bf74a0cb449ecd549531f956b72287203e6880b87/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:816568614ecb22b18a010c7a12559c19f6fe993526af88e95a76d5a60b8b75fb", size = 403283, upload-time = "2025-05-21T12:43:59.546Z" }, + { url = "https://files.pythonhosted.org/packages/d1/ca/e0f0bc1a75a8925024f343258c8ecbd8828f8997ea2ac71e02f67b6f5299/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c6564c0947a7f52e4792983f8e6cf9bac140438ebf81f527a21d944f2fd0a40", size = 524634, upload-time = "2025-05-21T12:44:01.087Z" }, + { url = "https://files.pythonhosted.org/packages/3e/03/5d0be919037178fff33a6672ffc0afa04ea1cfcb61afd4119d1b5280ff0f/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c4a128527fe415d73cf1f70a9a688d06130d5810be69f3b553bf7b45e8acf79", size = 416233, upload-time = "2025-05-21T12:44:02.604Z" }, + { url = "https://files.pythonhosted.org/packages/05/7c/8abb70f9017a231c6c961a8941403ed6557664c0913e1bf413cbdc039e75/rpds_py-0.25.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a49e1d7a4978ed554f095430b89ecc23f42014a50ac385eb0c4d163ce213c325", size = 390375, upload-time = "2025-05-21T12:44:04.162Z" }, + { url = "https://files.pythonhosted.org/packages/7a/ac/a87f339f0e066b9535074a9f403b9313fd3892d4a164d5d5f5875ac9f29f/rpds_py-0.25.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d74ec9bc0e2feb81d3f16946b005748119c0f52a153f6db6a29e8cd68636f295", size = 424537, upload-time = "2025-05-21T12:44:06.175Z" }, + { url = "https://files.pythonhosted.org/packages/1f/8f/8d5c1567eaf8c8afe98a838dd24de5013ce6e8f53a01bd47fe8bb06b5533/rpds_py-0.25.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3af5b4cc10fa41e5bc64e5c198a1b2d2864337f8fcbb9a67e747e34002ce812b", size = 566425, upload-time = "2025-05-21T12:44:08.242Z" }, + { url = "https://files.pythonhosted.org/packages/95/33/03016a6be5663b389c8ab0bbbcca68d9e96af14faeff0a04affcb587e776/rpds_py-0.25.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:79dc317a5f1c51fd9c6a0c4f48209c6b8526d0524a6904fc1076476e79b00f98", size = 595197, upload-time = "2025-05-21T12:44:10.449Z" }, + { url = "https://files.pythonhosted.org/packages/33/8d/da9f4d3e208c82fda311bff0cf0a19579afceb77cf456e46c559a1c075ba/rpds_py-0.25.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1521031351865e0181bc585147624d66b3b00a84109b57fcb7a779c3ec3772cd", size = 561244, upload-time = "2025-05-21T12:44:12.387Z" }, + { url = "https://files.pythonhosted.org/packages/e2/b3/39d5dcf7c5f742ecd6dbc88f6f84ae54184b92f5f387a4053be2107b17f1/rpds_py-0.25.1-cp313-cp313-win32.whl", hash = "sha256:5d473be2b13600b93a5675d78f59e63b51b1ba2d0476893415dfbb5477e65b31", size = 222254, upload-time = "2025-05-21T12:44:14.261Z" }, + { url = "https://files.pythonhosted.org/packages/5f/19/2d6772c8eeb8302c5f834e6d0dfd83935a884e7c5ce16340c7eaf89ce925/rpds_py-0.25.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7b74e92a3b212390bdce1d93da9f6488c3878c1d434c5e751cbc202c5e09500", size = 234741, upload-time = "2025-05-21T12:44:16.236Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/145ada26cfaf86018d0eb304fe55eafdd4f0b6b84530246bb4a7c4fb5c4b/rpds_py-0.25.1-cp313-cp313-win_arm64.whl", hash = "sha256:dd326a81afe332ede08eb39ab75b301d5676802cdffd3a8f287a5f0b694dc3f5", size = 224830, upload-time = "2025-05-21T12:44:17.749Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ca/d435844829c384fd2c22754ff65889c5c556a675d2ed9eb0e148435c6690/rpds_py-0.25.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:a58d1ed49a94d4183483a3ce0af22f20318d4a1434acee255d683ad90bf78129", size = 359668, upload-time = "2025-05-21T12:44:19.322Z" }, + { url = "https://files.pythonhosted.org/packages/1f/01/b056f21db3a09f89410d493d2f6614d87bb162499f98b649d1dbd2a81988/rpds_py-0.25.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f251bf23deb8332823aef1da169d5d89fa84c89f67bdfb566c49dea1fccfd50d", size = 345649, upload-time = "2025-05-21T12:44:20.962Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0f/e0d00dc991e3d40e03ca36383b44995126c36b3eafa0ccbbd19664709c88/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dbd586bfa270c1103ece2109314dd423df1fa3d9719928b5d09e4840cec0d72", size = 384776, upload-time = "2025-05-21T12:44:22.516Z" }, + { url = "https://files.pythonhosted.org/packages/9f/a2/59374837f105f2ca79bde3c3cd1065b2f8c01678900924949f6392eab66d/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d273f136e912aa101a9274c3145dcbddbe4bac560e77e6d5b3c9f6e0ed06d34", size = 395131, upload-time = "2025-05-21T12:44:24.147Z" }, + { url = "https://files.pythonhosted.org/packages/9c/dc/48e8d84887627a0fe0bac53f0b4631e90976fd5d35fff8be66b8e4f3916b/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:666fa7b1bd0a3810a7f18f6d3a25ccd8866291fbbc3c9b912b917a6715874bb9", size = 520942, upload-time = "2025-05-21T12:44:25.915Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f5/ee056966aeae401913d37befeeab57a4a43a4f00099e0a20297f17b8f00c/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:921954d7fbf3fccc7de8f717799304b14b6d9a45bbeec5a8d7408ccbf531faf5", size = 411330, upload-time = "2025-05-21T12:44:27.638Z" }, + { url = "https://files.pythonhosted.org/packages/ab/74/b2cffb46a097cefe5d17f94ede7a174184b9d158a0aeb195f39f2c0361e8/rpds_py-0.25.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3d86373ff19ca0441ebeb696ef64cb58b8b5cbacffcda5a0ec2f3911732a194", size = 387339, upload-time = "2025-05-21T12:44:29.292Z" }, + { url = "https://files.pythonhosted.org/packages/7f/9a/0ff0b375dcb5161c2b7054e7d0b7575f1680127505945f5cabaac890bc07/rpds_py-0.25.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c8980cde3bb8575e7c956a530f2c217c1d6aac453474bf3ea0f9c89868b531b6", size = 418077, upload-time = "2025-05-21T12:44:30.877Z" }, + { url = "https://files.pythonhosted.org/packages/0d/a1/fda629bf20d6b698ae84c7c840cfb0e9e4200f664fc96e1f456f00e4ad6e/rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8eb8c84ecea987a2523e057c0d950bcb3f789696c0499290b8d7b3107a719d78", size = 562441, upload-time = "2025-05-21T12:44:32.541Z" }, + { url = "https://files.pythonhosted.org/packages/20/15/ce4b5257f654132f326f4acd87268e1006cc071e2c59794c5bdf4bebbb51/rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:e43a005671a9ed5a650f3bc39e4dbccd6d4326b24fb5ea8be5f3a43a6f576c72", size = 590750, upload-time = "2025-05-21T12:44:34.557Z" }, + { url = "https://files.pythonhosted.org/packages/fb/ab/e04bf58a8d375aeedb5268edcc835c6a660ebf79d4384d8e0889439448b0/rpds_py-0.25.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:58f77c60956501a4a627749a6dcb78dac522f249dd96b5c9f1c6af29bfacfb66", size = 558891, upload-time = "2025-05-21T12:44:37.358Z" }, + { url = "https://files.pythonhosted.org/packages/90/82/cb8c6028a6ef6cd2b7991e2e4ced01c854b6236ecf51e81b64b569c43d73/rpds_py-0.25.1-cp313-cp313t-win32.whl", hash = "sha256:2cb9e5b5e26fc02c8a4345048cd9998c2aca7c2712bd1b36da0c72ee969a3523", size = 218718, upload-time = "2025-05-21T12:44:38.969Z" }, + { url = "https://files.pythonhosted.org/packages/b6/97/5a4b59697111c89477d20ba8a44df9ca16b41e737fa569d5ae8bff99e650/rpds_py-0.25.1-cp313-cp313t-win_amd64.whl", hash = "sha256:401ca1c4a20cc0510d3435d89c069fe0a9ae2ee6495135ac46bdd49ec0495763", size = 232218, upload-time = "2025-05-21T12:44:40.512Z" }, + { url = "https://files.pythonhosted.org/packages/89/74/716d42058ef501e2c08f27aa3ff455f6fc1bbbd19a6ab8dea07e6322d217/rpds_py-0.25.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ce4c8e485a3c59593f1a6f683cf0ea5ab1c1dc94d11eea5619e4fb5228b40fbd", size = 373475, upload-time = "2025-05-21T12:44:42.136Z" }, + { url = "https://files.pythonhosted.org/packages/e1/21/3faa9c523e2496a2505d7440b6f24c9166f37cb7ac027cac6cfbda9b4b5f/rpds_py-0.25.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d8222acdb51a22929c3b2ddb236b69c59c72af4019d2cba961e2f9add9b6e634", size = 359349, upload-time = "2025-05-21T12:44:43.813Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1c/c747fe568d21b1d679079b52b926ebc4d1497457510a1773dc5fd4b7b4e2/rpds_py-0.25.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4593c4eae9b27d22df41cde518b4b9e4464d139e4322e2127daa9b5b981b76be", size = 386526, upload-time = "2025-05-21T12:44:45.452Z" }, + { url = "https://files.pythonhosted.org/packages/0b/cc/4a41703de4fb291f13660fa3d882cbd39db5d60497c6e7fa7f5142e5e69f/rpds_py-0.25.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd035756830c712b64725a76327ce80e82ed12ebab361d3a1cdc0f51ea21acb0", size = 400526, upload-time = "2025-05-21T12:44:47.011Z" }, + { url = "https://files.pythonhosted.org/packages/f1/78/60c980bedcad8418b614f0b4d6d420ecf11225b579cec0cb4e84d168b4da/rpds_py-0.25.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:114a07e85f32b125404f28f2ed0ba431685151c037a26032b213c882f26eb908", size = 525726, upload-time = "2025-05-21T12:44:48.838Z" }, + { url = "https://files.pythonhosted.org/packages/3f/37/f2f36b7f1314b3c3200d663decf2f8e29480492a39ab22447112aead4693/rpds_py-0.25.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dec21e02e6cc932538b5203d3a8bd6aa1480c98c4914cb88eea064ecdbc6396a", size = 412045, upload-time = "2025-05-21T12:44:50.433Z" }, + { url = "https://files.pythonhosted.org/packages/df/96/e03783e87a775b1242477ccbc35895f8e9b2bbdb60e199034a6da03c2687/rpds_py-0.25.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09eab132f41bf792c7a0ea1578e55df3f3e7f61888e340779b06050a9a3f16e9", size = 386953, upload-time = "2025-05-21T12:44:52.092Z" }, + { url = "https://files.pythonhosted.org/packages/7c/7d/1418f4b69bfb4b40481a3d84782113ad7d4cca0b38ae70b982dd5b20102a/rpds_py-0.25.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c98f126c4fc697b84c423e387337d5b07e4a61e9feac494362a59fd7a2d9ed80", size = 421144, upload-time = "2025-05-21T12:44:53.734Z" }, + { url = "https://files.pythonhosted.org/packages/b3/0e/61469912c6493ee3808012e60f4930344b974fcb6b35c4348e70b6be7bc7/rpds_py-0.25.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0e6a327af8ebf6baba1c10fadd04964c1965d375d318f4435d5f3f9651550f4a", size = 563730, upload-time = "2025-05-21T12:44:55.846Z" }, + { url = "https://files.pythonhosted.org/packages/f6/86/6d0a5cc56481ac61977b7c839677ed5c63d38cf0fcb3e2280843a8a6f476/rpds_py-0.25.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:bc120d1132cff853ff617754196d0ac0ae63befe7c8498bd67731ba368abe451", size = 592321, upload-time = "2025-05-21T12:44:57.514Z" }, + { url = "https://files.pythonhosted.org/packages/5d/87/d1e2453fe336f71e6aa296452a8c85c2118b587b1d25ce98014f75838a60/rpds_py-0.25.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:140f61d9bed7839446bdd44852e30195c8e520f81329b4201ceead4d64eb3a9f", size = 558162, upload-time = "2025-05-21T12:44:59.564Z" }, + { url = "https://files.pythonhosted.org/packages/ad/92/349f04b1644c5cef3e2e6c53b7168a28531945f9e6fca7425f6d20ddbc3c/rpds_py-0.25.1-cp39-cp39-win32.whl", hash = "sha256:9c006f3aadeda131b438c3092124bd196b66312f0caa5823ef09585a669cf449", size = 219920, upload-time = "2025-05-21T12:45:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/f2/84/3969bef883a3f37ff2213795257cb7b7e93a115829670befb8de0e003031/rpds_py-0.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:a61d0b2c7c9a0ae45732a77844917b427ff16ad5464b4d4f5e4adb955f582890", size = 231452, upload-time = "2025-05-21T12:45:02.85Z" }, + { url = "https://files.pythonhosted.org/packages/78/ff/566ce53529b12b4f10c0a348d316bd766970b7060b4fd50f888be3b3b281/rpds_py-0.25.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b24bf3cd93d5b6ecfbedec73b15f143596c88ee249fa98cefa9a9dc9d92c6f28", size = 373931, upload-time = "2025-05-21T12:45:05.01Z" }, + { url = "https://files.pythonhosted.org/packages/83/5d/deba18503f7c7878e26aa696e97f051175788e19d5336b3b0e76d3ef9256/rpds_py-0.25.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:0eb90e94f43e5085623932b68840b6f379f26db7b5c2e6bcef3179bd83c9330f", size = 359074, upload-time = "2025-05-21T12:45:06.714Z" }, + { url = "https://files.pythonhosted.org/packages/0d/74/313415c5627644eb114df49c56a27edba4d40cfd7c92bd90212b3604ca84/rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d50e4864498a9ab639d6d8854b25e80642bd362ff104312d9770b05d66e5fb13", size = 387255, upload-time = "2025-05-21T12:45:08.669Z" }, + { url = "https://files.pythonhosted.org/packages/8c/c8/c723298ed6338963d94e05c0f12793acc9b91d04ed7c4ba7508e534b7385/rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c9409b47ba0650544b0bb3c188243b83654dfe55dcc173a86832314e1a6a35d", size = 400714, upload-time = "2025-05-21T12:45:10.39Z" }, + { url = "https://files.pythonhosted.org/packages/33/8a/51f1f6aa653c2e110ed482ef2ae94140d56c910378752a1b483af11019ee/rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:796ad874c89127c91970652a4ee8b00d56368b7e00d3477f4415fe78164c8000", size = 523105, upload-time = "2025-05-21T12:45:12.273Z" }, + { url = "https://files.pythonhosted.org/packages/c7/a4/7873d15c088ad3bff36910b29ceb0f178e4b3232c2adbe9198de68a41e63/rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:85608eb70a659bf4c1142b2781083d4b7c0c4e2c90eff11856a9754e965b2540", size = 411499, upload-time = "2025-05-21T12:45:13.95Z" }, + { url = "https://files.pythonhosted.org/packages/90/f3/0ce1437befe1410766d11d08239333ac1b2d940f8a64234ce48a7714669c/rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4feb9211d15d9160bc85fa72fed46432cdc143eb9cf6d5ca377335a921ac37b", size = 387918, upload-time = "2025-05-21T12:45:15.649Z" }, + { url = "https://files.pythonhosted.org/packages/94/d4/5551247988b2a3566afb8a9dba3f1d4a3eea47793fd83000276c1a6c726e/rpds_py-0.25.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ccfa689b9246c48947d31dd9d8b16d89a0ecc8e0e26ea5253068efb6c542b76e", size = 421705, upload-time = "2025-05-21T12:45:17.788Z" }, + { url = "https://files.pythonhosted.org/packages/b0/25/5960f28f847bf736cc7ee3c545a7e1d2f3b5edaf82c96fb616c2f5ed52d0/rpds_py-0.25.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:3c5b317ecbd8226887994852e85de562f7177add602514d4ac40f87de3ae45a8", size = 564489, upload-time = "2025-05-21T12:45:19.466Z" }, + { url = "https://files.pythonhosted.org/packages/02/66/1c99884a0d44e8c2904d3c4ec302f995292d5dde892c3bf7685ac1930146/rpds_py-0.25.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:454601988aab2c6e8fd49e7634c65476b2b919647626208e376afcd22019eeb8", size = 592557, upload-time = "2025-05-21T12:45:21.362Z" }, + { url = "https://files.pythonhosted.org/packages/55/ae/4aeac84ebeffeac14abb05b3bb1d2f728d00adb55d3fb7b51c9fa772e760/rpds_py-0.25.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:1c0c434a53714358532d13539272db75a5ed9df75a4a090a753ac7173ec14e11", size = 558691, upload-time = "2025-05-21T12:45:23.084Z" }, + { url = "https://files.pythonhosted.org/packages/41/b3/728a08ff6f5e06fe3bb9af2e770e9d5fd20141af45cff8dfc62da4b2d0b3/rpds_py-0.25.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f73ce1512e04fbe2bc97836e89830d6b4314c171587a99688082d090f934d20a", size = 231651, upload-time = "2025-05-21T12:45:24.72Z" }, + { url = "https://files.pythonhosted.org/packages/49/74/48f3df0715a585cbf5d34919c9c757a4c92c1a9eba059f2d334e72471f70/rpds_py-0.25.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ee86d81551ec68a5c25373c5643d343150cc54672b5e9a0cafc93c1870a53954", size = 374208, upload-time = "2025-05-21T12:45:26.306Z" }, + { url = "https://files.pythonhosted.org/packages/55/b0/9b01bb11ce01ec03d05e627249cc2c06039d6aa24ea5a22a39c312167c10/rpds_py-0.25.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:89c24300cd4a8e4a51e55c31a8ff3918e6651b241ee8876a42cc2b2a078533ba", size = 359262, upload-time = "2025-05-21T12:45:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/a9/eb/5395621618f723ebd5116c53282052943a726dba111b49cd2071f785b665/rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:771c16060ff4e79584dc48902a91ba79fd93eade3aa3a12d6d2a4aadaf7d542b", size = 387366, upload-time = "2025-05-21T12:45:30.42Z" }, + { url = "https://files.pythonhosted.org/packages/68/73/3d51442bdb246db619d75039a50ea1cf8b5b4ee250c3e5cd5c3af5981cd4/rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:785ffacd0ee61c3e60bdfde93baa6d7c10d86f15655bd706c89da08068dc5038", size = 400759, upload-time = "2025-05-21T12:45:32.516Z" }, + { url = "https://files.pythonhosted.org/packages/b7/4c/3a32d5955d7e6cb117314597bc0f2224efc798428318b13073efe306512a/rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a40046a529cc15cef88ac5ab589f83f739e2d332cb4d7399072242400ed68c9", size = 523128, upload-time = "2025-05-21T12:45:34.396Z" }, + { url = "https://files.pythonhosted.org/packages/be/95/1ffccd3b0bb901ae60b1dd4b1be2ab98bb4eb834cd9b15199888f5702f7b/rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:85fc223d9c76cabe5d0bff82214459189720dc135db45f9f66aa7cffbf9ff6c1", size = 411597, upload-time = "2025-05-21T12:45:36.164Z" }, + { url = "https://files.pythonhosted.org/packages/ef/6d/6e6cd310180689db8b0d2de7f7d1eabf3fb013f239e156ae0d5a1a85c27f/rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0be9965f93c222fb9b4cc254235b3b2b215796c03ef5ee64f995b1b69af0762", size = 388053, upload-time = "2025-05-21T12:45:38.45Z" }, + { url = "https://files.pythonhosted.org/packages/4a/87/ec4186b1fe6365ced6fa470960e68fc7804bafbe7c0cf5a36237aa240efa/rpds_py-0.25.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8378fa4a940f3fb509c081e06cb7f7f2adae8cf46ef258b0e0ed7519facd573e", size = 421821, upload-time = "2025-05-21T12:45:40.732Z" }, + { url = "https://files.pythonhosted.org/packages/7a/60/84f821f6bf4e0e710acc5039d91f8f594fae0d93fc368704920d8971680d/rpds_py-0.25.1-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:33358883a4490287e67a2c391dfaea4d9359860281db3292b6886bf0be3d8692", size = 564534, upload-time = "2025-05-21T12:45:42.672Z" }, + { url = "https://files.pythonhosted.org/packages/41/3a/bc654eb15d3b38f9330fe0f545016ba154d89cdabc6177b0295910cd0ebe/rpds_py-0.25.1-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1d1fadd539298e70cac2f2cb36f5b8a65f742b9b9f1014dd4ea1f7785e2470bf", size = 592674, upload-time = "2025-05-21T12:45:44.533Z" }, + { url = "https://files.pythonhosted.org/packages/2e/ba/31239736f29e4dfc7a58a45955c5db852864c306131fd6320aea214d5437/rpds_py-0.25.1-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9a46c2fb2545e21181445515960006e85d22025bd2fe6db23e76daec6eb689fe", size = 558781, upload-time = "2025-05-21T12:45:46.281Z" }, + { url = "https://files.pythonhosted.org/packages/78/b2/198266f070c6760e0e8cd00f9f2b9c86133ceebbe7c6d114bdcfea200180/rpds_py-0.25.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:50f2c501a89c9a5f4e454b126193c5495b9fb441a75b298c60591d8a2eb92e1b", size = 373973, upload-time = "2025-05-21T12:45:48.081Z" }, + { url = "https://files.pythonhosted.org/packages/13/79/1265eae618f88aa5d5e7122bd32dd41700bafe5a8bcea404e998848cd844/rpds_py-0.25.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d779b325cc8238227c47fbc53964c8cc9a941d5dbae87aa007a1f08f2f77b23", size = 359326, upload-time = "2025-05-21T12:45:49.825Z" }, + { url = "https://files.pythonhosted.org/packages/30/ab/6913b96f3ac072e87e76e45fe938263b0ab0d78b6b2cef3f2e56067befc0/rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:036ded36bedb727beeabc16dc1dad7cb154b3fa444e936a03b67a86dc6a5066e", size = 387544, upload-time = "2025-05-21T12:45:51.764Z" }, + { url = "https://files.pythonhosted.org/packages/b0/23/129ed12d25229acc6deb8cbe90baadd8762e563c267c9594eb2fcc15be0c/rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:245550f5a1ac98504147cba96ffec8fabc22b610742e9150138e5d60774686d7", size = 400240, upload-time = "2025-05-21T12:45:54.061Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e0/6811a38a5efa46b7ee6ed2103c95cb9abb16991544c3b69007aa679b6944/rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff7c23ba0a88cb7b104281a99476cccadf29de2a0ef5ce864959a52675b1ca83", size = 525599, upload-time = "2025-05-21T12:45:56.457Z" }, + { url = "https://files.pythonhosted.org/packages/6c/10/2dc88bcaa0d86bdb59e017a330b1972ffeeb7f5061bb5a180c9a2bb73bbf/rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e37caa8cdb3b7cf24786451a0bdb853f6347b8b92005eeb64225ae1db54d1c2b", size = 411154, upload-time = "2025-05-21T12:45:58.525Z" }, + { url = "https://files.pythonhosted.org/packages/cf/d1/a72d522eb7d934fb33e9c501e6ecae00e2035af924d4ff37d964e9a3959b/rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2f48ab00181600ee266a095fe815134eb456163f7d6699f525dee471f312cf", size = 388297, upload-time = "2025-05-21T12:46:00.264Z" }, + { url = "https://files.pythonhosted.org/packages/55/90/0dd7169ec74f042405b6b73512200d637a3088c156f64e1c07c18aa2fe59/rpds_py-0.25.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e5fc7484fa7dce57e25063b0ec9638ff02a908304f861d81ea49273e43838c1", size = 421894, upload-time = "2025-05-21T12:46:02.065Z" }, + { url = "https://files.pythonhosted.org/packages/37/e9/45170894add451783ed839c5c4a495e050aa8baa06d720364d9dff394dac/rpds_py-0.25.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:d3c10228d6cf6fe2b63d2e7985e94f6916fa46940df46b70449e9ff9297bd3d1", size = 564409, upload-time = "2025-05-21T12:46:03.891Z" }, + { url = "https://files.pythonhosted.org/packages/59/d0/31cece9090e76fbdb50c758c165d40da604b03b37c3ba53f010bbfeb130a/rpds_py-0.25.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:5d9e40f32745db28c1ef7aad23f6fc458dc1e29945bd6781060f0d15628b8ddf", size = 592681, upload-time = "2025-05-21T12:46:06.009Z" }, + { url = "https://files.pythonhosted.org/packages/f1/4c/22ef535efb2beec614ba7be83e62b439eb83b0b0d7b1775e22d35af3f9b5/rpds_py-0.25.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:35a8d1a24b5936b35c5003313bc177403d8bdef0f8b24f28b1c4a255f94ea992", size = 558744, upload-time = "2025-05-21T12:46:07.78Z" }, + { url = "https://files.pythonhosted.org/packages/79/ff/f2150efc8daf0581d4dfaf0a2a30b08088b6df900230ee5ae4f7c8cd5163/rpds_py-0.25.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6099263f526efff9cf3883dfef505518730f7a7a93049b1d90d42e50a22b4793", size = 231305, upload-time = "2025-05-21T12:46:10.52Z" }, +] + +[[package]] +name = "semantic-version" +version = "2.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/31/f2289ce78b9b473d582568c234e104d2a342fd658cc288a7553d83bb8595/semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c", size = 52289, upload-time = "2022-05-26T13:35:23.454Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/23/8146aad7d88f4fcb3a6218f41a60f6c2d4e3a72de72da1825dc7c8f7877c/semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177", size = 15552, upload-time = "2022-05-26T13:35:21.206Z" }, +] + +[[package]] +name = "send2trash" +version = "1.8.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fd/3a/aec9b02217bb79b87bbc1a21bc6abc51e3d5dcf65c30487ac96c0908c722/Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf", size = 17394, upload-time = "2024-04-07T00:01:09.267Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/b0/4562db6223154aa4e22f939003cb92514c79f3d4dccca3444253fd17f902/Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9", size = 18072, upload-time = "2024-04-07T00:01:07.438Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "setuptools-rust" +version = "1.11.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "semantic-version" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e0/92/bf8589b1a2b6107cf9ec8daa9954c0b7620643fe1f37d31d75e572d995f5/setuptools_rust-1.11.1.tar.gz", hash = "sha256:7dabc4392252ced314b8050d63276e05fdc5d32398fc7d3cce1f6a6ac35b76c0", size = 310804, upload-time = "2025-04-04T14:28:10.576Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/01/37e1376f80578882e4f2d451f57d1fb42a599832057a123f57d9f26395c8/setuptools_rust-1.11.1-py3-none-any.whl", hash = "sha256:5eaaddaed268dc24a527ffa659ce56b22d3cf17b781247b779efd611031fe8ea", size = 28120, upload-time = "2025-04-04T14:28:09.564Z" }, +] + +[[package]] +name = "setuptools-scm" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "packaging" }, + { name = "setuptools" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/19/7ae64b70b2429c48c3a7a4ed36f50f94687d3bfcd0ae2f152367b6410dff/setuptools_scm-8.3.1.tar.gz", hash = "sha256:3d555e92b75dacd037d32bafdf94f97af51ea29ae8c7b234cf94b7a5bd242a63", size = 78088, upload-time = "2025-04-23T11:53:19.739Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/ac/8f96ba9b4cfe3e4ea201f23f4f97165862395e9331a424ed325ae37024a8/setuptools_scm-8.3.1-py3-none-any.whl", hash = "sha256:332ca0d43791b818b841213e76b1971b7711a960761c5bea5fc5cdb5196fbce3", size = 43935, upload-time = "2025-04-23T11:53:17.922Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "slotscheck" +version = "0.19.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/57/6fcb8df11e7c76eb87b23bfa931408e47f051c6161749c531b4060a45516/slotscheck-0.19.1.tar.gz", hash = "sha256:6146b7747f8db335a00a66b782f86011b74b995f61746dc5b36a9e77d5326013", size = 16050, upload-time = "2024-10-19T13:30:53.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/32/bd569256267f80b76b87d21a09795741a175778b954bee1d7b1a89852b6f/slotscheck-0.19.1-py3-none-any.whl", hash = "sha256:bff9926f8d6408ea21b6c6bbaa4389cea1682962e73ee4f30084b6d2b89260ee", size = 16995, upload-time = "2024-10-19T13:30:51.23Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "snowballstemmer" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/75/a7/9810d872919697c9d01295633f5d574fb416d47e535f258272ca1f01f447/snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895", size = 105575, upload-time = "2025-05-09T16:34:51.843Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/78/3565d011c61f5a43488987ee32b6f3f656e7f107ac2782dd57bdd7d91d9a/snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064", size = 103274, upload-time = "2025-05-09T16:34:50.371Z" }, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594, upload-time = "2021-05-16T22:03:42.897Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575, upload-time = "2021-05-16T22:03:41.177Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/f4/4a80cd6ef364b2e8b65b15816a843c0980f7a5a2b4dc701fc574952aa19f/soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a", size = 103418, upload-time = "2025-04-20T18:50:08.518Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" }, +] + +[[package]] +name = "sphinx" +version = "7.4.7" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "alabaster", version = "0.7.16", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "babel", marker = "python_full_version < '3.10'" }, + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version < '3.10'" }, + { name = "imagesize", marker = "python_full_version < '3.10'" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jinja2", marker = "python_full_version < '3.10'" }, + { name = "packaging", marker = "python_full_version < '3.10'" }, + { name = "pygments", marker = "python_full_version < '3.10'" }, + { name = "requests", marker = "python_full_version < '3.10'" }, + { name = "snowballstemmer", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version < '3.10'" }, + { name = "tomli", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/be/50e50cb4f2eff47df05673d361095cafd95521d2a22521b920c67a372dcb/sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe", size = 8067911, upload-time = "2024-07-20T14:46:56.059Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/ef/153f6803c5d5f8917dbb7f7fcf6d34a871ede3296fa89c2c703f5f8a6c8e/sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239", size = 3401624, upload-time = "2024-07-20T14:46:52.142Z" }, +] + +[[package]] +name = "sphinx" +version = "8.1.3" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.14'", + "python_full_version == '3.13.*'", + "python_full_version >= '3.11' and python_full_version < '3.13'", + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "babel", marker = "python_full_version >= '3.10'" }, + { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version >= '3.10'" }, + { name = "imagesize", marker = "python_full_version >= '3.10'" }, + { name = "jinja2", marker = "python_full_version >= '3.10'" }, + { name = "packaging", marker = "python_full_version >= '3.10'" }, + { name = "pygments", marker = "python_full_version >= '3.10'" }, + { name = "requests", marker = "python_full_version >= '3.10'" }, + { name = "snowballstemmer", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version >= '3.10'" }, + { name = "tomli", marker = "python_full_version == '3.10.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611, upload-time = "2024-10-13T20:27:13.93Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125, upload-time = "2024-10-13T20:27:10.448Z" }, +] + +[[package]] +name = "sphinx-rtd-theme" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-jquery" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/44/c97faec644d29a5ceddd3020ae2edffa69e7d00054a8c7a6021e82f20335/sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85", size = 7620463, upload-time = "2024-11-13T11:06:04.545Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/77/46e3bac77b82b4df5bb5b61f2de98637724f246b4966cfc34bc5895d852a/sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13", size = 7655561, upload-time = "2024-11-13T11:06:02.094Z" }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053, upload-time = "2024-07-29T01:09:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300, upload-time = "2024-07-29T01:08:58.99Z" }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967, upload-time = "2024-07-29T01:09:23.417Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530, upload-time = "2024-07-29T01:09:21.945Z" }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617, upload-time = "2024-07-29T01:09:37.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705, upload-time = "2024-07-29T01:09:36.407Z" }, +] + +[[package]] +name = "sphinxcontrib-jquery" +version = "4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/f3/aa67467e051df70a6330fe7770894b3e4f09436dea6881ae0b4f3d87cad8/sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a", size = 122331, upload-time = "2023-03-14T15:01:01.944Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/85/749bd22d1a68db7291c89e2ebca53f4306c3f205853cf31e9de279034c3c/sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae", size = 121104, upload-time = "2023-03-14T15:01:00.356Z" }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787, upload-time = "2019-01-21T16:10:16.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071, upload-time = "2019-01-21T16:10:14.333Z" }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165, upload-time = "2024-07-29T01:09:56.435Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743, upload-time = "2024-07-29T01:09:54.885Z" }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080, upload-time = "2024-07-29T01:10:09.332Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072, upload-time = "2024-07-29T01:10:08.203Z" }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, +] + +[[package]] +name = "stdlibs" +version = "2025.5.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/48/6f/92324b26048ff76b02dbb616d26b51a57e751bac7a7934016bb25a407725/stdlibs-2025.5.10.tar.gz", hash = "sha256:75d55a0b7b070ec44bd7dae5bc1ee1a6cea742122fb4253313cb4ab354f7f0c5", size = 19625, upload-time = "2025-05-11T03:46:42.917Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/51/a8f17bbb8f01cef657153972a99e382ce5c5e33a1a2df959f3ed2ebe2b89/stdlibs-2025.5.10-py3-none-any.whl", hash = "sha256:25178d9c2b45d2680292413bf59a20293355d45056ec92d32ea6ed349ce9e2a1", size = 57264, upload-time = "2025-05-11T03:46:41.633Z" }, +] + +[[package]] +name = "tabulate" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/fe/802052aecb21e3797b8f7902564ab6ea0d60ff8ca23952079064155d1ae1/tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c", size = 81090, upload-time = "2022-10-06T17:21:48.54Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252, upload-time = "2022-10-06T17:21:44.262Z" }, +] + +[[package]] +name = "terminado" +version = "0.18.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess", marker = "os_name != 'nt'" }, + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "tornado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/11/965c6fd8e5cc254f1fe142d547387da17a8ebfd75a3455f637c663fb38a0/terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e", size = 32701, upload-time = "2024-03-12T14:34:39.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/9e/2064975477fdc887e47ad42157e214526dcad8f317a948dee17e1659a62f/terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0", size = 14154, upload-time = "2024-03-12T14:34:36.569Z" }, +] + +[[package]] +name = "testslide" +version = "2.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "psutil" }, + { name = "pygments" }, + { name = "typeguard" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ee/6f/c8d6d60a597c693559dab3b3362bd01e2212530e9a163eb0164af81e1ec1/TestSlide-2.7.1.tar.gz", hash = "sha256:d25890d5c383f673fac44a5f9e2561b7118d04f29f2c2b3d4f549e6db94cb34d", size = 50255, upload-time = "2023-03-16T14:09:41.204Z" } + +[[package]] +name = "tinycss2" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7a/fd/7a5ee21fd08ff70d3d33a5781c255cbe779659bd03278feb98b19ee550f4/tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7", size = 87085, upload-time = "2024-10-24T14:58:29.895Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610, upload-time = "2024-10-24T14:58:28.029Z" }, +] + +[[package]] +name = "toml" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253, upload-time = "2020-11-01T01:40:22.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588, upload-time = "2020-11-01T01:40:20.672Z" }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, +] + +[[package]] +name = "tornado" +version = "6.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/89/c72771c81d25d53fe33e3dca61c233b665b2780f21820ba6fd2c6793c12b/tornado-6.5.1.tar.gz", hash = "sha256:84ceece391e8eb9b2b95578db65e920d2a61070260594819589609ba9bc6308c", size = 509934, upload-time = "2025-05-22T18:15:38.788Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/89/f4532dee6843c9e0ebc4e28d4be04c67f54f60813e4bf73d595fe7567452/tornado-6.5.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d50065ba7fd11d3bd41bcad0825227cc9a95154bad83239357094c36708001f7", size = 441948, upload-time = "2025-05-22T18:15:20.862Z" }, + { url = "https://files.pythonhosted.org/packages/15/9a/557406b62cffa395d18772e0cdcf03bed2fff03b374677348eef9f6a3792/tornado-6.5.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9e9ca370f717997cb85606d074b0e5b247282cf5e2e1611568b8821afe0342d6", size = 440112, upload-time = "2025-05-22T18:15:22.591Z" }, + { url = "https://files.pythonhosted.org/packages/55/82/7721b7319013a3cf881f4dffa4f60ceff07b31b394e459984e7a36dc99ec/tornado-6.5.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b77e9dfa7ed69754a54c89d82ef746398be82f749df69c4d3abe75c4d1ff4888", size = 443672, upload-time = "2025-05-22T18:15:24.027Z" }, + { url = "https://files.pythonhosted.org/packages/7d/42/d11c4376e7d101171b94e03cef0cbce43e823ed6567ceda571f54cf6e3ce/tornado-6.5.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253b76040ee3bab8bcf7ba9feb136436a3787208717a1fb9f2c16b744fba7331", size = 443019, upload-time = "2025-05-22T18:15:25.735Z" }, + { url = "https://files.pythonhosted.org/packages/7d/f7/0c48ba992d875521ac761e6e04b0a1750f8150ae42ea26df1852d6a98942/tornado-6.5.1-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:308473f4cc5a76227157cdf904de33ac268af770b2c5f05ca6c1161d82fdd95e", size = 443252, upload-time = "2025-05-22T18:15:27.499Z" }, + { url = "https://files.pythonhosted.org/packages/89/46/d8d7413d11987e316df4ad42e16023cd62666a3c0dfa1518ffa30b8df06c/tornado-6.5.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:caec6314ce8a81cf69bd89909f4b633b9f523834dc1a352021775d45e51d9401", size = 443930, upload-time = "2025-05-22T18:15:29.299Z" }, + { url = "https://files.pythonhosted.org/packages/78/b2/f8049221c96a06df89bed68260e8ca94beca5ea532ffc63b1175ad31f9cc/tornado-6.5.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:13ce6e3396c24e2808774741331638ee6c2f50b114b97a55c5b442df65fd9692", size = 443351, upload-time = "2025-05-22T18:15:31.038Z" }, + { url = "https://files.pythonhosted.org/packages/76/ff/6a0079e65b326cc222a54720a748e04a4db246870c4da54ece4577bfa702/tornado-6.5.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5cae6145f4cdf5ab24744526cc0f55a17d76f02c98f4cff9daa08ae9a217448a", size = 443328, upload-time = "2025-05-22T18:15:32.426Z" }, + { url = "https://files.pythonhosted.org/packages/49/18/e3f902a1d21f14035b5bc6246a8c0f51e0eef562ace3a2cea403c1fb7021/tornado-6.5.1-cp39-abi3-win32.whl", hash = "sha256:e0a36e1bc684dca10b1aa75a31df8bdfed656831489bc1e6a6ebed05dc1ec365", size = 444396, upload-time = "2025-05-22T18:15:34.205Z" }, + { url = "https://files.pythonhosted.org/packages/7b/09/6526e32bf1049ee7de3bebba81572673b19a2a8541f795d887e92af1a8bc/tornado-6.5.1-cp39-abi3-win_amd64.whl", hash = "sha256:908e7d64567cecd4c2b458075589a775063453aeb1d2a1853eedb806922f568b", size = 444840, upload-time = "2025-05-22T18:15:36.1Z" }, + { url = "https://files.pythonhosted.org/packages/55/a7/535c44c7bea4578e48281d83c615219f3ab19e6abc67625ef637c73987be/tornado-6.5.1-cp39-abi3-win_arm64.whl", hash = "sha256:02420a0eb7bf617257b9935e2b754d1b63897525d8a289c9d65690d580b4dcf7", size = 443596, upload-time = "2025-05-22T18:15:37.433Z" }, +] + +[[package]] +name = "trailrunner" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pathspec" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4d/93/630e10bacd897daeb9ff5a408f4e7cb0fc2f243e7e3ef00f9e6cf319b11c/trailrunner-1.4.0.tar.gz", hash = "sha256:3fe61e259e6b2e5192f321c265985b7a0dc18497ced62b2da244f08104978398", size = 15836, upload-time = "2023-03-27T07:54:35.515Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/29/21001afea86bac5016c3940b43de3ce4786b0d8337d4ea79bb903c649ce3/trailrunner-1.4.0-py3-none-any.whl", hash = "sha256:a286d39f2723f28d167347f41cf8f232832648709366e722f55cf5545772a48e", size = 11071, upload-time = "2023-03-27T07:54:32.514Z" }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, +] + +[[package]] +name = "typeguard" +version = "2.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/38/c61bfcf62a7b572b5e9363a802ff92559cb427ee963048e1442e3aef7490/typeguard-2.13.3.tar.gz", hash = "sha256:00edaa8da3a133674796cf5ea87d9f4b4c367d77476e185e80251cc13dfbb8c4", size = 40604, upload-time = "2021-12-10T21:09:39.158Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/bb/d43e5c75054e53efce310e79d63df0ac3f25e34c926be5dffb7d283fb2a8/typeguard-2.13.3-py3-none-any.whl", hash = "sha256:5e3e3be01e887e7eafae5af63d1f36c849aaa94e3a0112097312aabfa16284f1", size = 17605, upload-time = "2021-12-10T21:09:37.844Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20250516" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ef/88/d65ed807393285204ab6e2801e5d11fbbea811adcaa979a2ed3b67a5ef41/types_python_dateutil-2.9.0.20250516.tar.gz", hash = "sha256:13e80d6c9c47df23ad773d54b2826bd52dbbb41be87c3f339381c1700ad21ee5", size = 13943, upload-time = "2025-05-16T03:06:58.385Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/3f/b0e8db149896005adc938a1e7f371d6d7e9eca4053a29b108978ed15e0c2/types_python_dateutil-2.9.0.20250516-py3-none-any.whl", hash = "sha256:2b2b3f57f9c6a61fba26a9c0ffb9ea5681c9b83e69cd897c6b5f668d9c0cab93", size = 14356, upload-time = "2025-05-16T03:06:57.249Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423, upload-time = "2025-06-02T14:52:11.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" }, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825, upload-time = "2023-05-24T20:25:47.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827, upload-time = "2023-05-24T20:25:45.287Z" }, +] + +[[package]] +name = "ufmt" +version = "2.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "black" }, + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "libcst" }, + { name = "moreorless" }, + { name = "tomlkit" }, + { name = "trailrunner" }, + { name = "typing-extensions" }, + { name = "usort" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/f8/c25e242a8e12062172dea4117859757a11339bbc39b1a3c7fb6a6de03bb2/ufmt-2.8.0.tar.gz", hash = "sha256:72c9502915497678de9aeab8aa18604890f14f869f7f378dd26e2878bde84f13", size = 24482, upload-time = "2024-10-25T06:21:57.239Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/4b/3f1b6f566b6cf70ccc5cba9a638fe4459f1e373c34d74df2e40e41871d70/ufmt-2.8.0-py3-none-any.whl", hash = "sha256:47a690811c576ebd3a0e30d77d43b65c84240e5c1611e5cb4a880bdd7f4507c1", size = 28268, upload-time = "2024-10-25T06:21:55.822Z" }, +] + +[[package]] +name = "uri-template" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/31/c7/0336f2bd0bcbada6ccef7aaa25e443c118a704f828a0620c6fa0207c1b64/uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7", size = 21678, upload-time = "2023-06-21T01:49:05.374Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/00/3fca040d7cf8a32776d3d81a00c8ee7457e00f80c649f1e4a863c8321ae9/uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363", size = 11140, upload-time = "2023-06-21T01:49:03.467Z" }, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" }, +] + +[[package]] +name = "usort" +version = "1.0.8.post1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "libcst" }, + { name = "moreorless" }, + { name = "stdlibs" }, + { name = "toml" }, + { name = "trailrunner" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9b/f4/3ef48b43f2645f2cb4a37d6007e611bc669af44eecfee953c5dd57433011/usort-1.0.8.post1.tar.gz", hash = "sha256:68def75f2b20b97390c552c503e071ee06c65ad502c5f94f3bd03f095cf4dfe6", size = 83215, upload-time = "2024-02-12T04:29:33.632Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/55/cc51ceb3d93763b9d28def24615bc485212525550967ce9e992a455f9ab5/usort-1.0.8.post1-py3-none-any.whl", hash = "sha256:6c57cdf17b458c79f8a61eb3ce8bf3f93e36d3c2edd602b9b2aa16b6875d3255", size = 37281, upload-time = "2024-02-12T04:29:31.693Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" }, +] + +[[package]] +name = "webcolors" +version = "24.11.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7b/29/061ec845fb58521848f3739e466efd8250b4b7b98c1b6c5bf4d40b419b7e/webcolors-24.11.1.tar.gz", hash = "sha256:ecb3d768f32202af770477b8b65f318fa4f566c22948673a977b00d589dd80f6", size = 45064, upload-time = "2024-11-11T07:43:24.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/e8/c0e05e4684d13459f93d312077a9a2efbe04d59c393bc2b8802248c908d4/webcolors-24.11.1-py3-none-any.whl", hash = "sha256:515291393b4cdf0eb19c155749a096f779f7d909f7cceea072791cb9095b92e9", size = 14934, upload-time = "2024-11-11T07:43:22.529Z" }, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721, upload-time = "2017-04-05T20:21:34.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" }, +] + +[[package]] +name = "websocket-client" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e6/30/fba0d96b4b5fbf5948ed3f4681f7da2f9f64512e1d303f94b4cc174c24a5/websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da", size = 54648, upload-time = "2024-04-23T22:16:16.976Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/44687a29792a70e111c5c477230a72c4b957d88d16141199bf9acb7537a3/websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526", size = 58826, upload-time = "2024-04-23T22:16:14.422Z" }, +] + +[[package]] +name = "widgetsnbextension" +version = "4.0.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/53/2e0253c5efd69c9656b1843892052a31c36d37ad42812b5da45c62191f7e/widgetsnbextension-4.0.14.tar.gz", hash = "sha256:a3629b04e3edb893212df862038c7232f62973373869db5084aed739b437b5af", size = 1097428, upload-time = "2025-04-10T13:01:25.628Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/51/5447876806d1088a0f8f71e16542bf350918128d0a69437df26047c8e46f/widgetsnbextension-4.0.14-py3-none-any.whl", hash = "sha256:4875a9eaf72fbf5079dc372a51a9f268fc38d46f767cbf85c43a36da5cb9b575", size = 2196503, upload-time = "2025-04-10T13:01:23.086Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] diff --git a/zizmor.yml b/zizmor.yml new file mode 100644 index 00000000..faf5a0f0 --- /dev/null +++ b/zizmor.yml @@ -0,0 +1,5 @@ +rules: + unpinned-uses: + config: + policies: + "*": ref-pin \ No newline at end of file