uv/ecosystem/transformers/pyproject.toml
Charlie Marsh 6e05d37a1d
Remove av pin in transformers (#13518)
## Summary

This test started failing, and it fails at least back to v0.6, so I
don't think it's on our end. I'm wondering if all the wheels here were
yanked? They're visible in the lockfile, but not on PyPI:
https://pypi.org/project/av/9.2.0/#files. So to get this passing, let's
just unpin it.

Edit: Ahh, ok. It looks like the project ran out of space, so they
removed wheels for all the older versions:
https://github.com/PyAV-Org/PyAV/issues/1879.
2025-05-18 19:59:34 -04:00

351 lines
8.6 KiB
TOML

[project]
name = "transformers"
version = "4.39.0.dev0"
author = "The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)"
author_email = "transformers@huggingface.co"
description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
keywords = ["NLP", "vision", "speech", "deep", "learning", "transformer", "pytorch", "tensorflow", "jax", "BERT", "GPT-2", "Wav2Vec2", "ViT"]
requires-python = ">=3.9.0"
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
]
dependencies = [
"filelock",
"huggingface-hub>=0.19.3,<1.0",
"numpy>=1.17",
"packaging>=20.0",
"pyyaml>=5.1",
"regex!=2019.12.17",
"requests",
"tokenizers>=0.14,<0.19",
"safetensors>=0.4.1",
"tqdm>=4.27",
]
[project.urls]
repository = "https://github.com/huggingface/transformers"
[project.optional-dependencies]
ja = [
"fugashi>=1.0",
"ipadic>=1.0.0,<2.0",
"unidic_lite>=1.0.7",
"unidic>=1.0.2",
"sudachipy>=0.6.6",
"sudachidict_core>=20220729",
"rhoknp>=1.1.0,<1.3.1"
]
sklearn = ["scikit-learn"]
tf = [
"tensorflow>=2.6,<2.16",
"onnxconverter-common",
"tf2onnx",
"tensorflow-text<2.16",
"keras-nlp>=0.3.1"
]
tf-cpu = [
"tensorflow-cpu>=2.6,<2.16",
"onnxconverter-common",
"tf2onnx",
"tensorflow-text<2.16",
"keras-nlp>=0.3.1"
]
torch = ["torch", "accelerate>=0.21.0"]
accelerate = ["accelerate>=0.21.0"]
retrieval = ["faiss-cpu", "datasets!=2.5.0"]
flax = [
"jax>=0.4.1,<=0.4.13",
"jaxlib>=0.4.1,<=0.4.13",
"flax>=0.4.1,<=0.7.0",
"optax>=0.0.8,<=0.1.4"
]
tokenizers = ["tokenizers>=0.14,<0.19"]
ftfy = ["ftfy"]
onnxruntime = ["onnxruntime>=1.4.0", "onnxruntime-tools>=1.4.2"]
onnx = [
"onnxconverter-common",
"tf2onnx",
"onnxruntime>=1.4.0",
"onnxruntime-tools>=1.4.2"
]
modelcreation = ["cookiecutter==1.7.3"]
sagemaker = ["sagemaker==2.226.1"] # sagemaker>=2.31.0 # TODO(konsti)
deepspeed = ["deepspeed>=0.9.3", "accelerate>=0.21.0"]
optuna = ["optuna"]
ray = ["ray[tune]>=2.7.0"]
sigopt = ["sigopt"]
integrations = ["optuna", "ray[tune]>=2.7.0", "sigopt"]
serving = ["pydantic", "uvicorn", "fastapi", "starlette"]
audio = ["librosa", "pyctcdecode>=0.4.0", "phonemizer", "kenlm"]
speech = [
"torchaudio",
"librosa",
"pyctcdecode>=0.4.0",
"phonemizer",
"kenlm"
]
torch-speech = [
"torchaudio",
"librosa",
"pyctcdecode>=0.4.0",
"phonemizer",
"kenlm"
]
tf-speech = ["librosa", "pyctcdecode>=0.4.0", "phonemizer", "kenlm"]
flax-speech = ["librosa", "pyctcdecode>=0.4.0", "phonemizer", "kenlm"]
vision = ["Pillow>=10.0.1,<=15.0"]
timm = ["timm"]
torch-vision = ["torchvision", "Pillow>=10.0.1,<=15.0"]
# natten = ["natten>=0.14.6,<0.15.0"] # TODO(konsti)
codecarbon = ["codecarbon==1.2.0"]
video = ["decord==0.6.0", "av"]
sentencepiece = ["sentencepiece>=0.1.91,!=0.1.92", "protobuf"]
deepspeed-testing = [
"deepspeed>=0.9.3",
"accelerate>=0.21.0",
"pytest>=7.2.0,<8.0.0",
"pytest-xdist",
"timeout-decorator",
"parameterized",
"psutil",
"datasets!=2.5.0",
"dill<0.3.5",
"evaluate>=0.2.0",
"pytest-timeout",
"ruff==0.1.5",
"sacrebleu>=1.4.12,<2.0.0",
"rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"nltk",
"GitPython<3.1.19",
"hf-doc-builder>=0.3.0",
"protobuf",
"sacremoses",
"rjieba",
"beautifulsoup4",
"tensorboard",
"pydantic",
"faiss-cpu",
"datasets!=2.5.0",
"cookiecutter==1.7.3",
"optuna",
"sentencepiece>=0.1.91,!=0.1.92",
"protobuf"
]
quality = [
"datasets!=2.5.0",
"isort>=5.5.4",
"ruff==0.1.5",
"GitPython<3.1.19",
"hf-doc-builder>=0.3.0",
"urllib3<2.0.0"
]
all = [
"tensorflow>=2.6,<2.16",
"onnxconverter-common",
"tf2onnx",
"tensorflow-text<2.16",
"keras-nlp>=0.3.1",
"torch",
"accelerate>=0.21.0",
"jax>=0.4.1,<=0.4.13",
"jaxlib>=0.4.1,<=0.4.13",
"flax>=0.4.1,<=0.7.0",
"optax>=0.0.8,<=0.1.4",
"sentencepiece>=0.1.91,!=0.1.92",
"protobuf",
"tokenizers>=0.14,<0.19",
"torchaudio",
"librosa",
"pyctcdecode>=0.4.0",
"phonemizer",
"kenlm",
"Pillow>=10.0.1,<=15.0",
"optuna",
"ray[tune]>=2.7.0",
"sigopt",
"timm",
"torchvision",
"Pillow>=10.0.1,<=15.0",
"codecarbon==1.2.0",
"accelerate>=0.21.0",
"decord==0.6.0",
"av"
]
docs_specific = ["hf-doc-builder"]
docs = [
"tensorflow>=2.6,<2.16",
"onnxconverter-common",
"tf2onnx",
"tensorflow-text<2.16",
"keras-nlp>=0.3.1",
"torch",
"accelerate>=0.21.0",
"jax>=0.4.1,<=0.4.13",
"jaxlib>=0.4.1,<=0.4.13",
"flax>=0.4.1,<=0.7.0",
"optax>=0.0.8,<=0.1.4",
"sentencepiece>=0.1.91,!=0.1.92",
"protobuf",
"tokenizers>=0.14,<0.19",
"torchaudio",
"librosa",
"pyctcdecode>=0.4.0",
"phonemizer",
"kenlm",
"Pillow>=10.0.1,<=15.0",
"optuna",
"ray[tune]>=2.7.0",
"sigopt",
"timm",
"torchvision",
"Pillow>=10.0.1,<=15.0",
"codecarbon==1.2.0",
"accelerate>=0.21.0",
"decord==0.6.0",
"av",
"hf-doc-builder"
]
torchhub = [
"filelock",
"huggingface-hub>=0.19.3,<1.0",
"importlib_metadata",
"numpy>=1.17",
"packaging>=20.0",
"protobuf",
"regex!=2019.12.17",
"requests",
"sentencepiece>=0.1.91,!=0.1.92",
"torch",
"tokenizers>=0.14,<0.19",
"tqdm>=4.27"
]
agents = [
"diffusers",
"accelerate>=0.21.0",
"datasets!=2.5.0",
"torch",
"sentencepiece>=0.1.91,!=0.1.92",
"opencv-python",
"Pillow>=10.0.1,<=15.0"
]
[dependency-groups]
dev = [
"GitPython<3.1.19",
"Pillow>=10.0.1,<=15.0",
"accelerate>=0.21.0",
"av",
"beautifulsoup4",
"codecarbon==1.2.0",
"codecarbon==1.2.0",
"cookiecutter==1.7.3",
"datasets!=2.5.0",
"decord==0.6.0",
"dill<0.3.5",
"evaluate>=0.2.0",
"faiss-cpu",
"flax>=0.4.1,<=0.7.0",
"fugashi>=1.0",
"hf-doc-builder",
"hf-doc-builder>=0.3.0",
"ipadic>=1.0.0,<2.0",
"isort>=5.5.4",
"jax>=0.4.1,<=0.4.13",
"jaxlib>=0.4.1,<=0.4.13",
"kenlm",
"keras-nlp>=0.3.1",
"librosa",
"nltk",
"onnxconverter-common",
"onnxruntime-tools>=1.4.2",
"onnxruntime>=1.4.0",
"optax>=0.0.8,<=0.1.4",
"optuna",
"parameterized",
"phonemizer",
"protobuf",
"psutil",
"pyctcdecode>=0.4.0",
"pydantic",
"pytest-timeout",
"pytest-xdist",
"pytest>=7.2.0,<8.0.0",
"ray[tune]>=2.7.0",
"rhoknp>=1.1.0,<1.3.1",
"rjieba",
"rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff==0.1.5",
"sacrebleu>=1.4.12,<2.0.0",
"sacremoses",
"scikit-learn",
"sentencepiece>=0.1.91,!=0.1.92",
"sigopt",
"sudachidict_core>=20220729",
"sudachipy>=0.6.6",
"tensorboard",
"tensorflow-text<2.16",
"tensorflow>=2.6,<2.16",
"tf2onnx",
"timeout-decorator",
"timm",
"tokenizers>=0.14,<0.19",
"torch",
"torchaudio",
"torchvision",
"unidic>=1.0.2",
"unidic_lite>=1.0.7",
"urllib3<2.0.0"
]
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.ruff]
# Never enforce `E501` (line length violations).
ignore = ["C901", "E501", "E741", "F402", "F823"]
select = ["C", "E", "F", "I", "W"]
line-length = 119
# Ignore import violations in all `__init__.py` files.
[tool.ruff.per-file-ignores]
"__init__.py" = ["E402", "F401", "F403", "F811"]
"src/transformers/file_utils.py" = ["F401"]
"src/transformers/utils/dummy_*.py" = ["F401"]
[tool.ruff.isort]
lines-after-imports = 2
known-first-party = ["transformers"]
[tool.ruff.format]
# Like Black, use double quotes for strings.
quote-style = "double"
# Like Black, indent with spaces, rather than tabs.
indent-style = "space"
# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false
# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
[tool.pytest.ini_options]
doctest_optionflags = "NUMBER NORMALIZE_WHITESPACE ELLIPSIS"
doctest_glob = "**/*.md"
markers = [
"flash_attn_test: marks tests related to flash attention (deselect with '-m \"not flash_attn_test\"')",
"bitsandbytes: select (or deselect with `not`) bitsandbytes integration tests",
]