assistance-engine/Docker/requirements.txt

360 lines
8.5 KiB
Plaintext

# This file was autogenerated by uv via the following command:
# uv export --format requirements-txt --no-hashes --no-dev -o Docker/requirements.txt
accelerate==1.12.0
# via assistance-engine
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.13.3
# via langchain-community
aiosignal==1.4.0
# via aiohttp
annotated-types==0.7.0
# via pydantic
anyio==4.12.1
# via httpx
attrs==25.4.0
# via aiohttp
boto3==1.42.58
# via langchain-aws
botocore==1.42.58
# via
# boto3
# s3transfer
certifi==2026.1.4
# via
# elastic-transport
# httpcore
# httpx
# requests
charset-normalizer==3.4.4
# via requests
click==8.3.1
# via nltk
colorama==0.4.6 ; sys_platform == 'win32'
# via
# click
# loguru
# tqdm
cuda-bindings==12.9.4 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
cuda-pathfinder==1.3.5 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via cuda-bindings
dataclasses-json==0.6.7
# via langchain-community
elastic-transport==8.17.1
# via elasticsearch
elasticsearch==8.19.3
# via langchain-elasticsearch
filelock==3.24.3
# via
# huggingface-hub
# torch
frozenlist==1.8.0
# via
# aiohttp
# aiosignal
fsspec==2025.10.0
# via
# huggingface-hub
# torch
greenlet==3.3.2 ; platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64'
# via sqlalchemy
grpcio==1.78.1
# via
# assistance-engine
# grpcio-reflection
# grpcio-tools
grpcio-reflection==1.78.1
# via assistance-engine
grpcio-tools==1.78.1
# via assistance-engine
h11==0.16.0
# via httpcore
hf-xet==1.3.0 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
# via huggingface-hub
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via
# langgraph-sdk
# langsmith
# ollama
httpx-sse==0.4.3
# via langchain-community
huggingface-hub==0.36.2
# via
# accelerate
# langchain-huggingface
# tokenizers
idna==3.11
# via
# anyio
# httpx
# requests
# yarl
jinja2==3.1.6
# via torch
jmespath==1.1.0
# via
# boto3
# botocore
joblib==1.5.3
# via nltk
jsonpatch==1.33
# via langchain-core
jsonpointer==3.0.0
# via jsonpatch
langchain==1.2.10
# via assistance-engine
langchain-aws==1.3.1
# via assistance-engine
langchain-classic==1.0.1
# via langchain-community
langchain-community==0.4.1
# via assistance-engine
langchain-core==1.2.15
# via
# langchain
# langchain-aws
# langchain-classic
# langchain-community
# langchain-elasticsearch
# langchain-huggingface
# langchain-ollama
# langchain-text-splitters
# langgraph
# langgraph-checkpoint
# langgraph-prebuilt
langchain-elasticsearch==1.0.0
# via assistance-engine
langchain-huggingface==1.2.0
# via assistance-engine
langchain-ollama==1.0.1
# via assistance-engine
langchain-text-splitters==1.1.1
# via langchain-classic
langgraph==1.0.9
# via langchain
langgraph-checkpoint==4.0.0
# via
# langgraph
# langgraph-prebuilt
langgraph-prebuilt==1.0.8
# via langgraph
langgraph-sdk==0.3.8
# via langgraph
langsmith==0.7.6
# via
# langchain-classic
# langchain-community
# langchain-core
loguru==0.7.3
# via assistance-engine
markupsafe==3.0.3
# via jinja2
marshmallow==3.26.2
# via dataclasses-json
mpmath==1.3.0
# via sympy
multidict==6.7.1
# via
# aiohttp
# yarl
mypy-extensions==1.1.0
# via typing-inspect
networkx==3.6.1
# via torch
nltk==3.9.3
# via assistance-engine
numpy==2.4.2
# via
# accelerate
# assistance-engine
# elasticsearch
# langchain-aws
# langchain-community
# pandas
nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cuda-nvrtc-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cuda-runtime-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cudnn-cu12==9.10.2.21 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cufft-cu12==11.3.3.83 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cufile-cu12==1.13.1.3 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-curand-cu12==10.3.9.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cusolver-cu12==11.7.3.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-cusparse-cu12==12.5.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via
# nvidia-cusolver-cu12
# torch
nvidia-cusparselt-cu12==0.7.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-nccl-cu12==2.27.5 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-nvjitlink-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
# torch
nvidia-nvshmem-cu12==3.4.5 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
ollama==0.6.1
# via langchain-ollama
orjson==3.11.7
# via
# langgraph-sdk
# langsmith
ormsgpack==1.12.2
# via langgraph-checkpoint
packaging==24.2
# via
# accelerate
# huggingface-hub
# langchain-core
# langsmith
# marshmallow
pandas==3.0.1
# via assistance-engine
propcache==0.4.1
# via
# aiohttp
# yarl
protobuf==6.33.5
# via
# grpcio-reflection
# grpcio-tools
psutil==7.2.2
# via accelerate
pydantic==2.12.5
# via
# langchain
# langchain-aws
# langchain-classic
# langchain-core
# langgraph
# langsmith
# ollama
# pydantic-settings
pydantic-core==2.41.5
# via pydantic
pydantic-settings==2.13.1
# via langchain-community
python-dateutil==2.9.0.post0
# via
# botocore
# elasticsearch
# pandas
python-dotenv==1.2.1
# via
# assistance-engine
# pydantic-settings
pyyaml==6.0.3
# via
# accelerate
# huggingface-hub
# langchain-classic
# langchain-community
# langchain-core
rapidfuzz==3.14.3
# via assistance-engine
regex==2026.2.19
# via nltk
requests==2.32.5
# via
# huggingface-hub
# langchain-classic
# langchain-community
# langsmith
# requests-toolbelt
requests-toolbelt==1.0.0
# via langsmith
s3transfer==0.16.0
# via boto3
safetensors==0.7.0
# via accelerate
setuptools==82.0.0
# via
# grpcio-tools
# torch
simsimd==6.5.13
# via elasticsearch
six==1.17.0
# via python-dateutil
sqlalchemy==2.0.46
# via
# langchain-classic
# langchain-community
sympy==1.14.0
# via torch
tenacity==9.1.4
# via
# langchain-community
# langchain-core
tokenizers==0.22.2
# via langchain-huggingface
torch==2.10.0
# via accelerate
tqdm==4.67.3
# via
# assistance-engine
# huggingface-hub
# nltk
triton==3.6.0 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
typing-extensions==4.15.0
# via
# aiosignal
# anyio
# elasticsearch
# grpcio
# huggingface-hub
# langchain-core
# pydantic
# pydantic-core
# sqlalchemy
# torch
# typing-inspect
# typing-inspection
typing-inspect==0.9.0
# via dataclasses-json
typing-inspection==0.4.2
# via
# pydantic
# pydantic-settings
tzdata==2025.3 ; sys_platform == 'emscripten' or sys_platform == 'win32'
# via pandas
urllib3==2.6.3
# via
# botocore
# elastic-transport
# requests
uuid-utils==0.14.1
# via
# langchain-core
# langsmith
win32-setctime==1.2.0 ; sys_platform == 'win32'
# via loguru
xxhash==3.6.0
# via
# langgraph
# langsmith
yarl==1.22.0
# via aiohttp
zstandard==0.25.0
# via langsmith