commit fa15fc977b48a7dbfb64e6dc559509d2ba4c3259 Author: Quentin Torroba Date: Tue Dec 9 13:13:22 2025 +0100 Initial commit Co-Authored-By: Quentin Torroba Co-Authored-By: Laure Hugo Co-Authored-By: Benjamin Trom Co-Authored-By: Mathias Gesbert Co-Authored-By: Michel Thomazo Co-Authored-By: Clément Drouin Co-Authored-By: Vincent Guilloux Co-Authored-By: Valentin Berard Co-Authored-By: Mistral Vibe diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..70fb281 --- /dev/null +++ b/.envrc @@ -0,0 +1,12 @@ +# shellcheck shell=bash +if ! has nix_direnv_version || ! nix_direnv_version 3.1.0; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.1.0/direnvrc" "sha256-yMJ2OVMzrFaDPn7q8nCBZFRYpL/f0RcHzhmw/i6btJM=" +fi + +if command -v nix &>/dev/null; then + use flake +fi + +if command -v pre-commit &>/dev/null; then + pre-commit install +fi diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..c88eb1e --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,7 @@ +# CODEOWNERS + +# Default owners for everything in the repo +* @mistralai/mistral-vibe + +# Owners for specific directories +# Not needed for now, can be filled later diff --git a/.github/workflows/build-and-upload.yml b/.github/workflows/build-and-upload.yml new file mode 100644 index 0000000..8a281ab --- /dev/null +++ b/.github/workflows/build-and-upload.yml @@ -0,0 +1,86 @@ +name: Build and upload + +on: + workflow_dispatch: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + build-and-upload: + name: ${{ matrix.os }}-${{ matrix.arch }} + strategy: + matrix: + include: + # Linux + - runner: ubuntu-22.04 + os: linux + arch: x86_64 + # - runner: ubuntu-22.04-arm # ubuntu-22.04-arm, ubuntu-24.04-arm and windows-11-arm are not supported yet for private repositories + # os: linux + # arch: aarch64 + + # macOS + - runner: macos-15-intel + os: darwin + arch: x86_64 + - runner: macos-14 + os: darwin + arch: aarch64 + + # Windows + - runner: windows-2022 + os: windows + arch: x86_64 + # - runner: windows-11-arm # ubuntu-22.04-arm, ubuntu-24.04-arm and windows-11-arm are not supported yet for private repositories + # os: windows + # arch: aarch64 + runs-on: ${{ matrix.runner }} + + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + + - name: Install uv with caching + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + with: + version: "latest" + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: "3.12" + + - name: Sync dependencies + run: uv sync --all-extras + + - name: Build with PyInstaller + run: uv run pyinstaller vibe-acp.spec + + - name: Get package version with uv (Unix) + id: get_version_unix + if: ${{ matrix.os != 'windows' }} + run: python -c "import subprocess; version = subprocess.check_output(['uv', 'version']).decode().split()[1]; print(f'version={version}')" >> $GITHUB_OUTPUT + + - name: Get package version with uv (Windows) + id: get_version_windows + if: ${{ matrix.os == 'windows' }} + shell: pwsh + run: python -c "import subprocess; version = subprocess.check_output(['uv', 'version']).decode().split()[1]; print(f'version={version}')" >> $env:GITHUB_OUTPUT + + - name: Upload binary as artifact (Unix) + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + if: ${{ matrix.os != 'windows' }} + with: + name: vibe-acp-${{ matrix.os }}-${{ matrix.arch }}-${{ steps.get_version_unix.outputs.version }} + path: dist/vibe-acp + + - name: Upload binary as artifact (Windows) + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + if: ${{ matrix.os == 'windows' }} + with: + name: vibe-acp-${{ matrix.os }}-${{ matrix.arch }}-${{ steps.get_version_windows.outputs.version }} + path: dist/vibe-acp.exe diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..5a57431 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,126 @@ +name: CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +env: + PYTHON_VERSION: "3.12" + +jobs: + pre-commit: + name: Pre-commit + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + + - name: Install uv with caching + uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7 + with: + version: "latest" + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Sync dependencies + run: uv sync --all-extras + + - name: Install pip (required by pre-commit) + run: uv pip install pip + + - name: Add virtual environment to PATH + run: echo "$(pwd)/.venv/bin" >> $GITHUB_PATH + + - name: Cache pre-commit + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + path: ~/.cache/pre-commit + key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} + + - name: Run pre-commit + run: uv run pre-commit run --all-files --show-diff-on-failure + + tests: + name: Tests + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + + - name: Install uv with caching + uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7 + with: + version: "latest" + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Sync dependencies + run: uv sync --all-extras + + - name: Verify CLI can start + run: | + uv run vibe --help + uv run vibe-acp --help + + - name: Install ripgrep + run: sudo apt-get update && sudo apt-get install -y ripgrep + + - name: Run tests + run: uv run pytest --ignore tests/snapshots + + snapshot-tests: + name: Snapshot Tests + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + + - name: Install uv with caching + uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7 + with: + version: "latest" + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Sync dependencies + run: uv sync --all-extras + + - name: Run snapshot tests + id: snapshot-tests + run: uv run pytest tests/snapshots + continue-on-error: true + + - name: Upload snapshot report + if: steps.snapshot-tests.outcome == 'failure' + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + with: + name: snapshot-report + path: snapshot_report.html + if-no-files-found: warn + retention-days: 3 + + - name: Fail job if snapshot tests failed + if: steps.snapshot-tests.outcome == 'failure' + run: | + echo "Snapshot tests failed, failing job." + exit 1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..e4dc7c2 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,44 @@ +name: Release to Pipy + +on: + release: + types: [published] + workflow_dispatch: + +jobs: + release: + runs-on: ubuntu-latest + environment: + name: pypi + url: https://pypi.org/p/mistral-vibe + permissions: + id-token: write + contents: read + + steps: + - name: Checkout code + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: "3.12" + + - name: Install uv + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + + - name: Install dependencies + run: uv sync --locked --dev + + - name: Build package + run: uv build + + - name: Upload artifacts + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + with: + name: dist + path: dist/ + + - name: Publish distribution to PyPI + if: github.repository == 'mistralai/mistral-vibe' + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f32ab67 --- /dev/null +++ b/.gitignore @@ -0,0 +1,201 @@ +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.*cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + +# nix / direnv +.direnv/ +result +result-* + +# Vibe runtime/session files; keep tools as part of repo when needed +.vibe/* + +# Tests run the agent in the playground, we don't need to keep the session files +tests/playground/* +. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..3e7c8e0 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,34 @@ +--- +repos: + - repo: https://github.com/mpalmer/action-validator + rev: v0.7.0 + hooks: + - id: action-validator + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-toml + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + exclude: tests/snapshots/.*\.svg$ + + - repo: https://github.com/fsouza/mirrors-pyright + rev: v1.1.407 + hooks: + - id: pyright + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.14.5 + hooks: + - id: ruff-check + args: [--fix, --unsafe-fixes] + - id: ruff-format + args: [--check] + + - repo: https://github.com/crate-ci/typos + rev: v1.34.0 + hooks: + - id: typos + args: [--write-changes] diff --git a/.python-version b/.python-version new file mode 100644 index 0000000..e4fba21 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 0000000..4799d36 --- /dev/null +++ b/.typos.toml @@ -0,0 +1,2 @@ +[default] +extend-ignore-re = ["(?m)^.*(#|//)\\s*typos:disable-line$", "datas"] diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..49f614c --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": ["ms-python.python", "charliermarsh.ruff"] +} diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..a63df2a --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,59 @@ +{ + "version": "0.1.0", + "configurations": [ + { + "name": "ACP Server", + "type": "debugpy", + "request": "launch", + "program": "vibe/acp/entrypoint.py", + "args": ["--workdir", "${workspaceFolder}"], + "console": "integratedTerminal", + "justMyCode": false + }, + { + "name": "Tests", + "type": "debugpy", + "request": "launch", + "module": "pytest", + "args": ["-v", "-s"], + "console": "integratedTerminal", + "justMyCode": false, + "cwd": "${workspaceFolder}", + "env": { + "PYTHONPATH": "${workspaceFolder}" + } + }, + { + "name": "Single Test", + "type": "debugpy", + "request": "launch", + "module": "pytest", + "args": ["-k", "${input:test_identifier}", "-vvv", "-s", "--no-header"], + "console": "integratedTerminal", + "justMyCode": false, + "cwd": "${workspaceFolder}", + "env": { + "PYTHONPATH": "${workspaceFolder}" + }, + "stopOnEntry": false, + "subProcess": true + }, + { + "name": "CLI", + "type": "debugpy", + "request": "launch", + "program": "vibe/cli/entrypoint.py", + "args": [], + "console": "integratedTerminal", + "justMyCode": false + } + ], + "inputs": [ + { + "id": "test_identifier", + "description": "Enter the test identifier (file, class, or function)", + "default": "TestInitialization", + "type": "promptString" + } + ] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..dd74058 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,26 @@ +{ + "[python]": { + "editor.codeActionsOnSave": { + "source.fixAll.ruff": "explicit", + "source.organizeImports.ruff": "explicit" + }, + "editor.defaultFormatter": "charliermarsh.ruff", + "editor.formatOnSave": true + }, + "cursorpyright.analysis.typeCheckingMode": "strict", + "editor.formatOnSave": true, + "files.exclude": { + ".pytest_cache/**": true, + ".venv/**": true, + "**/__pycache__": true, + "dist/**": true, + "build/**": true + }, + "files.insertFinalNewline": true, + "files.trimTrailingWhitespace": true, + "python.analysis.typeCheckingMode": "strict", + "python.testing.pytestArgs": ["tests"], + "python.testing.pytestEnabled": true, + "ruff.enable": true, + "ruff.organizeImports": true +} diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..2e76438 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,135 @@ +# python312.rule +# Rule for enforcing modern Python 3.12+ best practices. +# Applies to all Python files (*.py) in the project. +# +# Guidelines covered: +# - Use match-case syntax instead of if/elif/else for pattern matching. +# - Use the walrus operator (:=) when it simplifies assignments and tests. +# - Favor a "never nester" approach by avoiding deep nesting with early returns or guard clauses. +# - Employ modern type hints using built-in generics (list, dict) and the union pipe (|) operator, +# rather than deprecated types from the typing module (e.g., Optional, Union, Dict, List). +# - Ensure code adheres to strong static typing practices compatible with static analyzers like pyright. +# - Favor pathlib.Path methods for file system operations over older os.path functions. +# - Write code in a declarative and minimalist style that clearly expresses its intent. +# - Additional best practices including f-string formatting, comprehensions, context managers, and overall PEP 8 compliance. + +description: "Modern Python 3.12+ best practices and style guidelines for coding." +files: "**/*.py" + +guidelines: + - title: "Match-Case Syntax" + description: > + Prefer using the match-case construct over traditional if/elif/else chains when pattern matching + is applicable. This leads to clearer, more concise, and more maintainable code. + + - title: "Walrus Operator" + description: > + Utilize the walrus operator (:=) to streamline code where assignment and conditional testing can be combined. + Use it judiciously when it improves readability and reduces redundancy. + + - title: "Never Nester" + description: > + Aim to keep code flat by avoiding deep nesting. Use early returns, guard clauses, and refactoring to + minimize nested structures, making your code more readable and maintainable. + + - title: "Modern Type Hints" + description: > + Adopt modern type hinting by using built-in generics like list and dict, along with the pipe (|) operator + for union types (e.g., int | None). Avoid older, deprecated constructs such as Optional, Union, Dict, and List + from the typing module. + + - title: "Strong Static Typing" + description: > + Write code with explicit and robust type annotations that are fully compatible with static type checkers + like pyright. This ensures higher code reliability and easier maintenance. + + - title: "Pydantic-First Parsing" + description: > + Prefer Pydantic v2's native validation over ad-hoc parsing. Use `model_validate`, + `field_validator`, `from_attributes`, and field aliases to coerce external SDK/DTO objects. + Avoid manual `getattr`/`hasattr` flows and custom constructors like `from_sdk` unless they are + thin wrappers over `model_validate`. Keep normalization logic inside model validators so call sites + remain declarative and typed. + + - title: "Pathlib for File Operations" + description: > + Favor the use of pathlib.Path methods for file system operations. This approach offers a more + readable, object-oriented way to handle file paths and enhances cross-platform compatibility, + reducing reliance on legacy os.path functions. + + - title: "Declarative and Minimalist Code" + description: > + Write code that is declarative—clearly expressing its intentions rather than focusing on implementation details. + Strive to keep your code minimalist by removing unnecessary complexity and boilerplate. This approach improves + readability, maintainability, and aligns with modern Python practices. + + - title: "Additional Best Practices" + description: > + Embrace other modern Python idioms such as: + - Using f-strings for string formatting. + - Favoring comprehensions for building lists and dictionaries. + - Employing context managers (with statements) for resource management. + - Following PEP 8 guidelines to maintain overall code style consistency. + + - title: "Exception Documentation" + description: > + Document exceptions accurately and minimally in docstrings: + - Only document exceptions that are explicitly raised in the function implementation + - Remove Raises entries for exceptions that are not directly raised + - Include all possible exceptions from explicit raise statements + - For public APIs, document exceptions from called functions if they are allowed to propagate + - Avoid documenting built-in exceptions that are obvious (like TypeError from type hints) + This ensures documentation stays accurate and maintainable, avoiding the common pitfall + of listing every possible exception that could theoretically occur. + + - title: "Modern Enum Usage" + description: > + Leverage Python's enum module effectively following modern practices: + - Use StrEnum for string-based enums that need string comparison + - Use IntEnum/IntFlag for performance-critical integer-based enums + - Use auto() for automatic value assignment to maintain clean code + - Always use UPPERCASE for enum members to avoid name clashes + - Add methods to enums when behavior needs to be associated with values + - Use @property for computed attributes rather than storing values + - For type mixing, ensure mix-in types appear before Enum in base class sequence + - Consider Flag/IntFlag for bit field operations + - Use _generate_next_value_ for custom value generation + - Implement __bool__ when enum boolean evaluation should depend on value + This promotes type-safe constants, self-documenting code, and maintainable value sets. + + - title: "No Inline Ignores" + description: > + Do not use inline suppressions like `# type: ignore[...]` or `# noqa[...]` in production code. + Instead, fix types and lint warnings at the source by: + - Refining signatures with generics (TypeVar), Protocols, or precise return types + - Guarding with `isinstance` checks before attribute access + - Using `typing.cast` when control flow guarantees the type + - Extracting small helpers to create clearer, typed boundaries + If a suppression is truly unavoidable at an external boundary, prefer a narrow, well-typed wrapper + over in-line ignores, and document the rationale in code comments. + + - title: "Pydantic Discriminated Unions" + description: > + When modeling variants with a discriminated union (e.g., on a `transport` field), do not narrow a + field type in a subclass (e.g., overriding `transport: Literal['http']` with `Literal['streamable-http']`). + This violates Liskov substitution and triggers type checker errors due to invariance of class attributes. + Prefer sibling classes plus a shared mixin for common fields and helpers, and compose the union with + `Annotated[Union[...], Field(discriminator='transport')]`. + Example pattern: + - Create a base with shared non-discriminator fields (e.g., `_MCPBase`). + - Create a mixin with protocol-specific fields/methods (e.g., `_MCPHttpFields`), without a `transport`. + - Define sibling final classes per variant (e.g., `MCPHttp`, `MCPStreamableHttp`, `MCPStdio`) that set + `transport: Literal[...]` once in each final class. + - Use `match` on the discriminator to narrow types at call sites. + + - title: "Use uv for All Commands" + description: > + We use uv to manage our python environment. You should nevery try to run a bare python commands. + Always run commands using `uv` instead of invoking `python` or `pip` directly. + For example, use `uv add package` and `uv run script.py` rather than `pip install package` or `python script.py`. + This practice helps avoid environment drift and leverages modern Python packaging best practices. + Useful uv commands are: + - uv add/remove to manage dependencies + - uv sync to install dependencies declared in pyproject.toml and uv.lock + - uv run script.py to run a script within the uv environment + - uv run pytest (or any other python tool) to run the tool within the uv environment diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..5a8504c --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +- Initial release diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..dde90ba --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,168 @@ +# Contributing to Mistral Vibe + +Thank you for your interest in Mistral Vibe! We appreciate your enthusiasm and support. + +## Current Status + +**Mistral Vibe is in active development** — our team is iterating quickly and making lots of changes under the hood. Because of this pace, we may be slower than usual when reviewing PRs and issues. + +**We especially encourage**: + +- **Bug reports** – Help us uncover and squash issues +- **Feedback & ideas** – Tell us what works, what doesn't, and what could be even better +- **Documentation improvements** – Suggest clarity improvements or highlight missing pieces + +## How to Provide Feedback + +### Bug Reports + +If you encounter a bug, please open an issue with the following information: + +1. **Description**: A clear description of the bug +2. **Steps to Reproduce**: Detailed steps to reproduce the issue +3. **Expected Behavior**: What you expected to happen +4. **Actual Behavior**: What actually happened +5. **Environment**: + - Python version + - Operating system + - Vibe version +6. **Error Messages**: Any error messages or stack traces +7. **Configuration**: Relevant parts of your `config.toml` (redact any sensitive information) + +### Feature Requests and Feedback + +We'd love to hear your ideas! When submitting feedback or feature requests: + +1. **Clear Description**: Explain what you'd like to see or improve +2. **Use Case**: Describe your use case and why this would be valuable +3. **Alternatives**: If applicable, mention any alternatives you've considered + +## Development Setup + +This section is for developers who want to set up the repository for local development, even though we're not currently accepting contributions. + +### Prerequisites + +- Python 3.12 or higher +- [uv](https://github.com/astral-sh/uv) - Modern Python package manager + +### Setup + +1. Clone the repository: + + ```bash + git clone + cd mistral-vibe + ``` + +2. Install dependencies: + + ```bash + uv sync --all-extras + ``` + + This will install both runtime and development dependencies. + +3. (Optional) Install pre-commit hooks: + + ```bash + uv run pre-commit install + ``` + + Pre-commit hooks will automatically run checks before each commit. + +### Running Tests + +Run all tests: + +```bash +uv run pytest +``` + +Run tests with verbose output: + +```bash +uv run pytest -v +``` + +Run a specific test file: + +```bash +uv run pytest tests/test_agent_tool_call.py +``` + +### Linting and Type Checking + +#### Ruff (Linting and Formatting) + +Check for linting issues (without fixing): + +```bash +uv run ruff check . +``` + +Auto-fix linting issues: + +```bash +uv run ruff check --fix . +``` + +Format code: + +```bash +uv run ruff format . +``` + +Check formatting without modifying files (useful for CI): + +```bash +uv run ruff format --check . +``` + +#### Pyright (Type Checking) + +Run type checking: + +```bash +uv run pyright +``` + +#### Pre-commit Hooks + +Run all pre-commit hooks manually: + +```bash +uv run pre-commit run --all-files +``` + +The pre-commit hooks include: + +- Ruff (linting and formatting) +- Pyright (type checking) +- Typos (spell checking) +- YAML/TOML validation +- Action validator (for GitHub Actions) + +### Code Style + +- **Line length**: 88 characters (Black-compatible) +- **Type hints**: Required for all functions and methods +- **Docstrings**: Follow Google-style docstrings +- **Formatting**: Use Ruff for both linting and formatting +- **Type checking**: Use Pyright (configured in `pyproject.toml`) + +See `pyproject.toml` for detailed configuration of Ruff and Pyright. + +## Code Contributions + +While we're not accepting code contributions at the moment, we may open up contributions in the future. When that happens, we'll update this document with: + +- Pull request process +- Contribution guidelines +- Review process + +## Questions? + +If you have questions about using Mistral Vibe, please check the [README](README.md) first. For other inquiries, feel free to open a discussion or issue. + +Thank you for helping make Mistral Vibe better! 🙏 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d7e573e --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 Mistral AI + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..ef3b14a --- /dev/null +++ b/README.md @@ -0,0 +1,308 @@ +# Mistral Vibe + +[![PyPI Version](https://img.shields.io/pypi/v/mistral-vibe)](https://pypi.org/project/mistral-vibe) +[![Python Version](https://img.shields.io/badge/python-3.12%2B-blue)](https://www.python.org/downloads/release/python-3120/) +[![CI Status](https://github.com/mistralai/mistral-vibe/actions/workflows/ci.yml/badge.svg)](https://github.com/mistralai/mistral-vibe/actions/workflows/ci.yml) +[![License](https://img.shields.io/github/license/mistralai/mistral-vibe)](https://github.com/mistralai/mistral-vibe/blob/main/LICENSE) + +``` +██████████████████░░ +██████████████████░░ +████ ██████ ████░░ +████ ██ ████░░ +████ ████░░ +████ ██ ██ ████░░ +██ ██ ██░░ +██████████████████░░ +██████████████████░░ +``` + +**Mistral's open-source CLI coding assistant.** + +Mistral Vibe is a command-line coding assistant powered by Mistral's models. It provides a conversational interface to your codebase, allowing you to use natural language to explore, modify, and interact with your projects through a powerful set of tools. + +> [!WARNING] +> Mistral Vibe works on Windows, but we officially support and target UNIX environments. + +## Installation + +Vibe requires Python 3.12 or higher. + +### One-line install (recommended) + +```bash +# On Linux and macOS +curl -LsSf https://mistral.ai/vibe/install.sh | bash +``` + +### Using uv + +```bash +uv tool install mistral-vibe +``` + +### Using pip + +```bash +pip install mistral-vibe +``` + +## Features + +- **Interactive Chat**: A conversational AI agent that understands your requests and breaks down complex tasks. +- **Powerful Toolset**: A suite of tools for file manipulation, code searching, version control, and command execution, right from the chat prompt. + - Read, write, and patch files (`read_file`, `write_file`, `search_replace`). + - Execute shell commands in a stateful terminal (`bash`). + - Recursively search code with `grep` (with `ripgrep` support). + - Manage a `todo` list to track the agent's work. +- **Project-Aware Context**: Vibe automatically scans your project's file structure and Git status to provide relevant context to the agent, improving its understanding of your codebase. +- **Advanced CLI Experience**: Built with modern libraries for a smooth and efficient workflow. + - Autocompletion for slash commands (`/`) and file paths (`@`). + - Persistent command history. + - Beautiful Themes. +- **Highly Configurable**: Customize models, providers, tool permissions, and UI preferences through a simple `config.toml` file. +- **Safety First**: Features tool execution approval. + +## Quick Start + +1. Navigate to your project's root directory: + + ```bash + cd /path/to/your/project + ``` + +2. Run Vibe: + + ```bash + vibe + ``` + +3. If this is your first time running Vibe, it will: + + - Create a default configuration file at `~/.vibe/config.toml` + - Prompt you to enter your API key if it's not already configured + - Save your API key to `~/.vibe/.env` for future use + +4. Start interacting with the agent! + + ``` + > Can you find all instances of the word "TODO" in the project? + + 🤖 The user wants to find all instances of "TODO". The `grep` tool is perfect for this. I will use it to search the current directory. + + > grep(pattern="TODO", path=".") + + ... (grep tool output) ... + + 🤖 I found the following "TODO" comments in your project. + ``` + +## Usage + +### Interactive Mode + +Simply run `vibe` to enter the interactive chat loop. + +- **Multi-line Input**: Press `Ctrl+J` or `Shift+Enter` for select terminals to insert a newline. +- **File Paths**: Reference files in your prompt using the `@` symbol for smart autocompletion (e.g., `> Read the file @src/agent.py`). +- **Shell Commands**: Prefix any command with `!` to execute it directly in your shell, bypassing the agent (e.g., `> !ls -l`). + +You can start Vibe with a prompt with the following command: + +```bash +vibe "Refactor the main function in cli/main.py to be more modular." +``` + +**Note**: The `--auto-approve` flag automatically approves all tool executions without prompting. In interactive mode, you can also toggle auto-approve on/off using `Shift+Tab`. + +### Programmatic Mode + +You can run Vibe non-interactively by piping input or using the `--prompt` flag. This is useful for scripting. + +```bash +vibe --prompt "Refactor the main function in cli/main.py to be more modular." +``` + +by default it will use `auto-approve` mode. + +### Slash Commands + +Use slash commands for meta-actions and configuration changes during a session. + +## Configuration + +Vibe is configured via a `config.toml` file. It looks for this file first in `./.vibe/config.toml` and then falls back to `~/.vibe/config.toml`. + +### API Key Configuration + +Vibe supports multiple ways to configure your API keys: + +1. **Interactive Setup (Recommended for first-time users)**: When you run Vibe for the first time or if your API key is missing, Vibe will prompt you to enter it. The key will be securely saved to `~/.vibe/.env` for future sessions. + +2. **Environment Variables**: Set your API key as an environment variable: + + ```bash + export MISTRAL_API_KEY="your_mistral_api_key" + ``` + +3. **`.env` File**: Create a `.env` file in `~/.vibe/` and add your API keys: + + ```bash + MISTRAL_API_KEY=your_mistral_api_key + ``` + + Vibe automatically loads API keys from `~/.vibe/.env` on startup. Environment variables take precedence over the `.env` file if both are set. + +**Note**: The `.env` file is specifically for API keys and other provider credentials. General Vibe configuration should be done in `config.toml`. + +### Custom System Prompts + +You can create custom system prompts to replace the default one (`prompts/core.md`). Create a markdown file in the `~/.vibe/prompts/` directory with your custom prompt content. + +To use a custom system prompt, set the `system_prompt_id` in your configuration to match the filename (without the `.md` extension): + +```toml +# Use a custom system prompt +system_prompt_id = "my_custom_prompt" +``` + +This will load the prompt from `~/.vibe/prompts/my_custom_prompt.md`. + +### Custom Agent Configurations + +You can create custom agent configurations for specific use cases (e.g., red-teaming, specialized tasks) by adding agent-specific TOML files in the `~/.vibe/agents/` directory. + +To use a custom agent, run Vibe with the `--agent` flag: + +```bash +vibe --agent my_custom_agent +``` + +Vibe will look for a file named `my_custom_agent.toml` in the agents directory and apply its configuration. + +Example custom agent configuration (`~/.vibe/agents/redteam.toml`): + +```toml +# Custom agent configuration for red-teaming +active_model = "devstral-2" +system_prompt_id = "redteam" + +# Disable some tools for this agent +disabled_tools = ["search_replace", "write_file"] + +# Override tool permissions for this agent +[tools.bash] +permission = "always" + +[tools.read_file] +permission = "always" +``` + +Note: this implies that you have setup a redteam prompt names `~/.vibe/prompts/redteam.md` + +### MCP Server Configuration + +You can configure MCP (Model Context Protocol) servers to extend Vibe's capabilities. Add MCP server configurations under the `mcp_servers` section: + +```toml +# Example MCP server configurations +[[mcp_servers]] +name = "my_http_server" +transport = "http" +url = "http://localhost:8000" +headers = { "Authorization" = "Bearer my_token" } +api_key_env = "MY_API_KEY_ENV_VAR" +api_key_header = "Authorization" +api_key_format = "Bearer {token}" + +[[mcp_servers]] +name = "my_streamable_server" +transport = "streamable-http" +url = "http://localhost:8001" +headers = { "X-API-Key" = "my_api_key" } + +[[mcp_servers]] +name = "fetch_server" +transport = "stdio" +command = "uvx" +args = ["mcp-server-fetch"] +``` + +Supported transports: + +- `http`: Standard HTTP transport +- `streamable-http`: HTTP transport with streaming support +- `stdio`: Standard input/output transport (for local processes) + +Key fields: + +- `name`: A short alias for the server (used in tool names) +- `transport`: The transport type +- `url`: Base URL for HTTP transports +- `headers`: Additional HTTP headers +- `api_key_env`: Environment variable containing the API key +- `command`: Command to run for stdio transport +- `args`: Additional arguments for stdio transport + +### Enable/disable tools with patterns + +You can control which tools are active using `enabled_tools` and `disabled_tools`. +These fields support exact names, glob patterns, and regular expressions. + +Examples: + +```toml +# Only enable tools that start with "serena_" (glob) +enabled_tools = ["serena_*"] + +# Regex (prefix with re:) — matches full tool name (case-insensitive) +enabled_tools = ["re:^serena_.*$"] + +# Heuristic regex support (patterns like `serena.*` are treated as regex) +enabled_tools = ["serena.*"] + +# Disable a group with glob; everything else stays enabled +disabled_tools = ["mcp_*", "grep"] +``` + +Notes: + +- MCP tool names use underscores, e.g., `serena_list` not `serena.list`. +- Regex patterns are matched against the full tool name using fullmatch. + +### Custom Vibe Home Directory + +By default, Vibe stores its configuration in `~/.vibe/`. You can override this by setting the `VIBE_HOME` environment variable: + +```bash +export VIBE_HOME="/path/to/custom/vibe/home" +``` + +This affects where Vibe looks for: + +- `config.toml` - Main configuration +- `.env` - API keys +- `agents/` - Custom agent configurations +- `prompts/` - Custom system prompts +- `tools/` - Custom tools +- `logs/` - Session logsRetryTo run code, enable code execution and file creation in Settings > Capabilities. + +## Resources + +- [CHANGELOG](CHANGELOG.md) - See what's new in each version +- [CONTRIBUTING](CONTRIBUTING.md) - Guidelines for feedback and bug reports + +## License + +Copyright 2025 Mistral AI + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the [LICENSE](LICENSE) file for the full license text. diff --git a/action.yml b/action.yml new file mode 100644 index 0000000..8044a4a --- /dev/null +++ b/action.yml @@ -0,0 +1,64 @@ +--- +name: Mistral Vibe +description: "Download, install, and run Mistral Vibe" +author: Mistral AI + +inputs: + prompt: + description: The prompt to pass to the agent + required: true + default: | + You are a helpful assistant + MISTRAL_API_KEY: + description: API Key for AI Studio + required: true + install_python: + description: | + Whether or not to install Python + required: true + default: "true" + python_version: + description: | + Version of Python to install. Warning: Unsupported. + required: false + +runs: + using: "composite" + steps: + - name: Install Required Python Version + if: inputs.install_python == true + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version-file: ${{ github.action_path }}/.python-version + + - name: Install Requested Python Version + if: inputs.install_python && inputs.python_version + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 + with: + python-version: ${{ inputs.python_version }} + + - name: Install uv + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + + - name: Install Mistral Vibe + shell: bash + working-directory: ${{ github.action_path }} + run: | + uv sync --locked --all-extras --dev + + - name: Run Mistral Vibe + id: run-mistral-vibe + shell: bash + working-directory: ${{ github.action_path }} + env: + MISTRAL_API_KEY: "${{ inputs.MISTRAL_API_KEY }}" + run: | + # We want to make sure that any text passed in here + # doesn't have special bash characters (<, >, &, etc...) + ESCAPED_PROMPT=$(printf '%q' "${{ inputs.prompt }}") + + # Change back to the original working directory for the tool to work + cd "${{ github.workspace }}" + uv run --directory "${{ github.action_path }}" vibe \ + --auto-approve \ + -p "${ESCAPED_PROMPT}" diff --git a/distribution/zed/extension.toml b/distribution/zed/extension.toml new file mode 100644 index 0000000..0fae913 --- /dev/null +++ b/distribution/zed/extension.toml @@ -0,0 +1,35 @@ +id = "mistral-vibe" +name = "Mistral Vibe" +description = "Lightning-fast AI agent that actually gets things done" +version = "1.0.0" +schema_version = 1 +authors = ["Mistral AI"] +repository = "https://github.com/mistralai/mistral-vibe" + +[agent_servers.mistral-vibe-agent] +name = "Mistral Vibe" +icon = "./icons/mistral_vibe.svg" + +[agent_servers.mistral-vibe-agent.targets.darwin-aarch64] +archive = "https://github.com/mistralai/mistral-vibe/releases/download/v1.0.0/vibe-acp-darwin-aarch64-1.0.0.zip" +cmd = "./vibe-acp" + +[agent_servers.mistral-vibe-agent.targets.darwin-x86_64] +archive = "https://github.com/mistralai/mistral-vibe/releases/download/v1.0.0/vibe-acp-darwin-x86_64-1.0.0.zip" +cmd = "./vibe-acp" + +# [agent_servers.mistral-vibe-agent.targets.linux-aarch64] +# archive = "https://github.com/mistralai/mistral-vibe/releases/download/v1.0.0/vibe-acp-linux-aarch64-1.0.0.zip" +# cmd = "./vibe-acp" + +[agent_servers.mistral-vibe-agent.targets.linux-x86_64] +archive = "https://github.com/mistralai/mistral-vibe/releases/download/v1.0.0/vibe-acp-linux-x86_64-1.0.0.zip" +cmd = "./vibe-acp" + +# [agent_servers.mistral-vibe-agent.targets.windows-aarch64] +# archive = "https://github.com/mistralai/mistral-vibe/releases/download/v1.0.0/vibe-acp-windows-aarch64-1.0.0.zip" +# cmd = "./vibe-acp.exe" + +[agent_servers.mistral-vibe-agent.targets.windows-x86_64] +archive = "https://github.com/mistralai/mistral-vibe/releases/download/v1.0.0/vibe-acp-windows-x86_64-1.0.0.zip" +cmd = "./vibe-acp.exe" diff --git a/distribution/zed/icons/mistral_vibe.svg b/distribution/zed/icons/mistral_vibe.svg new file mode 100644 index 0000000..67f8b28 --- /dev/null +++ b/distribution/zed/icons/mistral_vibe.svg @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..844f308 --- /dev/null +++ b/flake.lock @@ -0,0 +1,133 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1763283776, + "narHash": "sha256-Y7TDFPK4GlqrKrivOcsHG8xSGqQx3A6c+i7novT85Uk=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "50a96edd8d0db6cc8db57dab6bb6d6ee1f3dc49a", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "pyproject-build-systems": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "pyproject-nix": [ + "pyproject-nix" + ], + "uv2nix": [ + "uv2nix" + ] + }, + "locked": { + "lastModified": 1761781027, + "narHash": "sha256-YDvxPAm2WnxrznRqWwHLjryBGG5Ey1ATEJXrON+TWt8=", + "owner": "pyproject-nix", + "repo": "build-system-pkgs", + "rev": "795a980d25301e5133eca37adae37283ec3c8e66", + "type": "github" + }, + "original": { + "owner": "pyproject-nix", + "repo": "build-system-pkgs", + "type": "github" + } + }, + "pyproject-nix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1763017646, + "narHash": "sha256-Z+R2lveIp6Skn1VPH3taQIuMhABg1IizJd8oVdmdHsQ=", + "owner": "pyproject-nix", + "repo": "pyproject.nix", + "rev": "47bd6f296502842643078d66128f7b5e5370790c", + "type": "github" + }, + "original": { + "owner": "pyproject-nix", + "repo": "pyproject.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs", + "pyproject-build-systems": "pyproject-build-systems", + "pyproject-nix": "pyproject-nix", + "uv2nix": "uv2nix" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "uv2nix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "pyproject-nix": [ + "pyproject-nix" + ] + }, + "locked": { + "lastModified": 1763349549, + "narHash": "sha256-GQKYN9j8HOh09RW2I739tyu87ygcsAmpJJ32FspWVJ8=", + "owner": "pyproject-nix", + "repo": "uv2nix", + "rev": "071b718279182c5585f74939c2902c202f93f588", + "type": "github" + }, + "original": { + "owner": "pyproject-nix", + "repo": "uv2nix", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..c070409 --- /dev/null +++ b/flake.nix @@ -0,0 +1,144 @@ +{ + description = "Mistral Vibe!"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + + pyproject-nix = { + url = "github:pyproject-nix/pyproject.nix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + + uv2nix = { + url = "github:pyproject-nix/uv2nix"; + inputs.pyproject-nix.follows = "pyproject-nix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + + pyproject-build-systems = { + url = "github:pyproject-nix/build-system-pkgs"; + inputs.pyproject-nix.follows = "pyproject-nix"; + inputs.uv2nix.follows = "uv2nix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = { + self, + nixpkgs, + flake-utils, + uv2nix, + pyproject-nix, + pyproject-build-systems, + ... + }: + flake-utils.lib.eachDefaultSystem (system: let + inherit (nixpkgs) lib; + + workspace = uv2nix.lib.workspace.loadWorkspace {workspaceRoot = ./.;}; + + overlay = workspace.mkPyprojectOverlay { + sourcePreference = "wheel"; # sdist if you want + }; + + pyprojectOverrides = final: prev: { + # NOTE: If a package complains about a missing dependency (such + # as setuptools), you can add it here. + untokenize = prev.untokenize.overrideAttrs (old: { + buildInputs = (old.buildInputs or []) ++ final.resolveBuildSystem {setuptools = [];}; + }); + }; + + pkgs = import nixpkgs { + inherit system; + }; + + python = pkgs.python312; + + # Construct package set + pythonSet = + # Use base package set from pyproject.nix builders + (pkgs.callPackage pyproject-nix.build.packages { + inherit python; + }).overrideScope + ( + lib.composeManyExtensions [ + pyproject-build-systems.overlays.default + overlay + pyprojectOverrides + ] + ); + in { + packages.default = pythonSet.mkVirtualEnv "mistralai-vibe-env" workspace.deps.default; + + apps = { + default = { + type = "app"; + program = "${self.packages.${system}.default}/bin/vibe"; + }; + }; + + devShells = { + default = let + editableOverlay = workspace.mkEditablePyprojectOverlay { + root = "$REPO_ROOT"; + }; + + editablePythonSet = pythonSet.overrideScope ( + lib.composeManyExtensions [ + editableOverlay + + # Apply fixups for building an editable package of your workspace packages + (final: prev: { + mistralai-vibe = prev.mistralai-vibe.overrideAttrs (old: { + # It's a good idea to filter the sources going into an editable build + # so the editable package doesn't have to be rebuilt on every change. + src = lib.fileset.toSource { + root = old.src; + fileset = lib.fileset.unions [ + (old.src + "/pyproject.toml") + (old.src + "/README.md") + ]; + }; + + nativeBuildInputs = + old.nativeBuildInputs + ++ final.resolveBuildSystem { + editables = []; + }; + }); + }) + ] + ); + + virtualenv = editablePythonSet.mkVirtualEnv "mistralai-vibe-dev-env" workspace.deps.all; + in + pkgs.mkShell { + packages = [ + virtualenv + pkgs.uv + ]; + + env = { + # Don't create venv using uv + UV_NO_SYNC = "1"; + + # Force uv to use Python interpreter from venv + UV_PYTHON = "${virtualenv}/bin/python"; + + # Prevent uv from downloading managed Python's + UV_PYTHON_DOWNLOADS = "never"; + }; + + shellHook = '' + # Undo dependency propagation by nixpkgs. + unset PYTHONPATH + + # Get repository root using git. This is expanded at runtime by the editable `.pth` machinery. + export REPO_ROOT=$(git rev-parse --show-toplevel) + ''; + }; + }; + }); +} diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..de598df --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,157 @@ +[project] +name = "mistral-vibe" +version = "1.0.0" +description = "Minimal CLI coding agent by Mistral" +readme = "README.md" +requires-python = ">=3.12" +license = { text = "Apache-2.0" } +authors = [{ name = "Mistral AI" }] +keywords = [ + "ai", + "cli", + "coding-assistant", + "mistral", + "llm", + "developer-tools", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.12", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Utilities", +] +dependencies = [ + "agent-client-protocol==0.6.3", + "aiofiles>=24.1.0", + "httpx>=0.28.1", + "mcp>=1.14.0", + "mistralai==1.9.11", + "pexpect>=4.9.0", + "packaging>=24.1", + "pydantic>=2.12.4", + "pydantic-settings>=2.12.0", + "python-dotenv>=1.0.0", + "pytest-xdist>=3.8.0", + "rich>=14.0.0", + "textual>=1.0.0", + "tomli-w>=1.2.0", + "watchfiles>=1.1.1", + "pyperclip>=1.11.0", +] + +[project.urls] +Homepage = "https://github.com/mistralai/mistral-vibe" +Repository = "https://github.com/mistralai/mistral-vibe" +Issues = "https://github.com/mistralai/mistral-vibe/issues" +Documentation = "https://github.com/mistralai/mistral-vibe#readme" + + +[build-system] +requires = ["hatchling", "hatch-vcs", "editables"] +build-backend = "hatchling.build" + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.hatch.build.targets.wheel] +include = ["vibe/"] + + +[project.scripts] +vibe = "vibe.cli.entrypoint:main" +vibe-acp = "vibe.acp.entrypoint:main" + + +[tool.uv] +package = true +required-version = ">=0.8.0" + + +[dependency-groups] +dev = [ + "pre-commit>=4.2.0", + "pyright>=1.1.403", + "pytest>=8.3.5", + "pytest-asyncio>=1.2.0", + "pytest-timeout>=2.4.0", + "pytest-textual-snapshot>=1.1.0", + "respx>=0.22.0", + "ruff>=0.14.5", + "twine>=5.0.0", + "typos>=1.34.0", + "vulture>=2.14", + "pyinstaller>=6.17.0", +] + +[tool.pyright] +pythonVersion = "3.12" +reportMissingTypeStubs = false +reportPrivateImportUsage = false +include = ["vibe/**/*.py", "tests/**/*.py"] +venvPath = "." +venv = ".venv" + +[tool.ruff] +include = ["vibe/**/*.py", "tests/**/*.py"] +line-length = 88 +target-version = "py312" +preview = true + +[tool.ruff.format] +skip-magic-trailing-comma = true + +[tool.ruff.lint] +select = [ + "F", + "I", + "D2", + "UP", + "TID", + "ANN", + "PLR", + "B0", + "B905", + "DOC102", + "RUF022", + "RUF010", + "RUF012", + "RUF019", + "RUF100", +] +ignore = ["D203", "D205", "D213", "ANN401", "PLR6301"] + +[tool.ruff.lint.per-file-ignores] +"tests/*" = ["ANN", "PLR"] + +[tool.ruff.lint.flake8-tidy-imports] +ban-relative-imports = "all" + +[tool.ruff.lint.isort] +known-first-party = ["vibe"] +force-sort-within-sections = true +split-on-trailing-comma = true +combine-as-imports = true +force-wrap-aliases = false +order-by-type = true +required-imports = ["from __future__ import annotations"] + +[tool.ruff.lint.pylint] +max-statements = 50 +max-branches = 15 +max-locals = 15 +max-args = 9 +max-returns = 6 +max-nested-blocks = 4 + +[tool.vulture] +ignore_decorators = ["@*"] + +[tool.pytest.ini_options] +addopts = "-vvvv -q -n auto --durations=5 --import-mode=importlib" +timeout = 10 diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..eb2c583 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,20 @@ +# Project Management Scripts + +This directory contains scripts that support project versioning and deployment workflows. + +## Versioning + +### Usage + +```bash +# Bump major version (1.0.0 -> 2.0.0) +uv run scripts/bump_version.py major + +# Bump minor version (1.0.0 -> 1.1.0) +uv run scripts/bump_version.py minor + +# Bump patch/micro version (1.0.0 -> 1.0.1) +uv run scripts/bump_version.py micro +# or +uv run scripts/bump_version.py patch +``` diff --git a/scripts/bump_version.py b/scripts/bump_version.py new file mode 100755 index 0000000..510a7f6 --- /dev/null +++ b/scripts/bump_version.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +"""Version bumping script for semver versioning. + +This script increments the version in pyproject.toml based on the specified bump type: +- major: 1.0.0 -> 2.0.0 +- minor: 1.0.0 -> 1.1.0 +- micro/patch: 1.0.0 -> 1.0.1 +""" + +from __future__ import annotations + +import argparse +from pathlib import Path +import re +import subprocess +import sys +from typing import Literal, get_args + +BumpType = Literal["major", "minor", "micro", "patch"] +BUMP_TYPES = get_args(BumpType) + + +def parse_version(version_str: str) -> tuple[int, int, int]: + match = re.match(r"^(\d+)\.(\d+)\.(\d+)$", version_str.strip()) + if not match: + raise ValueError(f"Invalid version format: {version_str}") + + return int(match.group(1)), int(match.group(2)), int(match.group(3)) + + +def format_version(major: int, minor: int, patch: int) -> str: + return f"{major}.{minor}.{patch}" + + +def bump_version(version: str, bump_type: BumpType) -> str: + major, minor, patch = parse_version(version) + + match bump_type: + case "major": + return format_version(major + 1, 0, 0) + case "minor": + return format_version(major, minor + 1, 0) + case "micro" | "patch": + return format_version(major, minor, patch + 1) + + +def update_hard_values_files(filepath: str, patterns: list[tuple[str, str]]) -> None: + path = Path(filepath) + + if not path.exists(): + raise FileNotFoundError(f"{filepath} not found in current directory") + + # Replace patterns + for pattern, replacement in patterns: + content = path.read_text() + updated_content = re.sub(pattern, replacement, content, flags=re.MULTILINE) + + if updated_content == content: + raise ValueError(f"pattern {pattern} not found in {filepath}") + + path.write_text(updated_content) + + print(f"Updated version in {filepath}") + + +def get_current_version() -> str: + pyproject_path = Path("pyproject.toml") + + if not pyproject_path.exists(): + raise FileNotFoundError("pyproject.toml not found in current directory") + + content = pyproject_path.read_text() + + # Find version line + version_match = re.search(r'^version = "([^"]+)"$', content, re.MULTILINE) + if not version_match: + raise ValueError("Version not found in pyproject.toml") + + return version_match.group(1) + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Bump semver version in pyproject.toml", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + uv run scripts/bump_version.py major # 1.0.0 -> 2.0.0 + uv run scripts/bump_version.py minor # 1.0.0 -> 1.1.0 + uv run scripts/bump_version.py micro # 1.0.0 -> 1.0.1 + uv run scripts/bump_version.py patch # 1.0.0 -> 1.0.1 + """, + ) + + parser.add_argument( + "bump_type", choices=BUMP_TYPES, help="Type of version bump to perform" + ) + + args = parser.parse_args() + + try: + # Get current version + current_version = get_current_version() + print(f"Current version: {current_version}") + + # Calculate new version + new_version = bump_version(current_version, args.bump_type) + print(f"New version: {new_version}") + + # Update pyproject.toml + update_hard_values_files( + "pyproject.toml", + [(f'version = "{current_version}"', f'version = "{new_version}"')], + ) + # Update extension.toml + update_hard_values_files( + "distribution/zed/extension.toml", + [ + (f'version = "{current_version}"', f'version = "{new_version}"'), + ( + f"releases/download/v{current_version}", + f"releases/download/v{new_version}", + ), + (f"-{current_version}.zip", f"-{new_version}.zip"), + ], + ) + + subprocess.run(["uv", "lock"], check=True) + + print(f"Successfully bumped version from {current_version} to {new_version}") + + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/scripts/install.sh b/scripts/install.sh new file mode 100755 index 0000000..ae979cd --- /dev/null +++ b/scripts/install.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash + +# Mistral Vibe Installation Script +# This script installs uv if not present and then installs mistral-vibe using uv + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +function error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +function info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +function success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +function warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +function check_platform() { + local platform=$(uname -s) + + if [[ "$platform" == "Linux" ]]; then + info "Detected Linux platform" + PLATFORM="linux" + elif [[ "$platform" == "Darwin" ]]; then + info "Detected macOS platform" + PLATFORM="macos" + else + error "Unsupported platform: $platform" + error "This installation script currently only supports Linux and macOS" + exit 1 + fi +} + +function check_uv_installed() { + if command -v uv &> /dev/null; then + info "uv is already installed: $(uv --version)" + UV_INSTALLED=true + else + info "uv is not installed" + UV_INSTALLED=false + fi +} + +function install_uv() { + info "Installing uv using the official Astral installer..." + + if ! command -v curl &> /dev/null; then + error "curl is required to install uv. Please install curl first." + exit 1 + fi + + if curl -LsSf https://astral.sh/uv/install.sh | sh; then + success "uv installed successfully" + + export PATH="$HOME/.local/bin:$PATH" + + if ! command -v uv &> /dev/null; then + warning "uv was installed but not found in PATH for this session" + warning "You may need to restart your terminal or run:" + warning " export PATH=\"\$HOME/.cargo/bin:\$HOME/.local/bin:\$PATH\"" + fi + else + error "Failed to install uv" + exit 1 + fi +} + +function install_vibe() { + info "Installing mistral-vibe from GitHub repository using uv..." + uv tool install mistral-vibe + + success "Mistral Vibe installed successfully! (commands: vibe, vibe-acp)" +} + +function main() { + echo + echo "██████████████████░░" + echo "██████████████████░░" + echo "████ ██████ ████░░" + echo "████ ██ ████░░" + echo "████ ████░░" + echo "████ ██ ██ ████░░" + echo "██ ██ ██░░" + echo "██████████████████░░" + echo "██████████████████░░" + echo + echo "Starting Mistral Vibe installation..." + echo + + check_platform + + check_uv_installed + + if [[ "$UV_INSTALLED" == "false" ]]; then + install_uv + fi + + install_vibe + + if command -v vibe &> /dev/null; then + success "Installation completed successfully!" + echo + echo "You can now run vibe with:" + echo " vibe" + echo + echo "Or for ACP mode:" + echo " vibe-acp" + else + error "Installation completed but 'vibe' command not found" + error "Please check your installation and PATH settings" + exit 1 + fi +} + +main diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..9d1efe2 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from pathlib import Path + +TESTS_ROOT = Path(__file__).parent diff --git a/tests/acp/test_acp.py b/tests/acp/test_acp.py new file mode 100644 index 0000000..7eda139 --- /dev/null +++ b/tests/acp/test_acp.py @@ -0,0 +1,925 @@ +from __future__ import annotations + +import asyncio +from collections.abc import AsyncGenerator +import json +import os +from typing import Any + +from acp import ( + InitializeRequest, + NewSessionRequest, + PromptRequest, + ReadTextFileRequest, + ReadTextFileResponse, + RequestPermissionRequest, + RequestPermissionResponse, + WriteTextFileRequest, +) +from acp.schema import ( + AgentCapabilities, + AllowedOutcome, + DeniedOutcome, + Implementation, + InitializeResponse, + McpCapabilities, + NewSessionResponse, + PromptCapabilities, + PromptResponse, + SessionNotification, + TextContentBlock, +) +from pydantic import BaseModel +import pytest + +from tests import TESTS_ROOT +from tests.mock.utils import get_mocking_env, mock_llm_chunk +from vibe.acp.utils import ToolOption +from vibe.core.types import FunctionCall, ToolCall + +RESPONSE_TIMEOUT = 2.0 +MOCK_ENTRYPOINT_PATH = "tests/mock/mock_entrypoint.py" +PLAYGROUND_DIR = TESTS_ROOT / "playground" + + +class JsonRpcRequest(BaseModel): + jsonrpc: str = "2.0" + id: int | str + method: str + params: Any | None = None + + +class JsonRpcError(BaseModel): + code: int + message: str + data: Any | None = None + + +class JsonRpcResponse(BaseModel): + jsonrpc: str = "2.0" + id: int | str | None = None + result: Any | None = None + error: JsonRpcError | None = None + + +class JsonRpcNotification(BaseModel): + jsonrpc: str = "2.0" + method: str + params: Any | None = None + + +type JsonRpcMessage = JsonRpcResponse | JsonRpcNotification | JsonRpcRequest + + +class InitializeJsonRpcRequest(JsonRpcRequest): + method: str = "initialize" + params: InitializeRequest | None = None + + +class InitializeJsonRpcResponse(JsonRpcResponse): + result: InitializeResponse | None = None + + +class NewSessionJsonRpcRequest(JsonRpcRequest): + method: str = "session/new" + params: NewSessionRequest | None = None + + +class NewSessionJsonRpcResponse(JsonRpcResponse): + result: NewSessionResponse | None = None + + +class PromptJsonRpcRequest(JsonRpcRequest): + method: str = "session/prompt" + params: PromptRequest | None = None + + +class PromptJsonRpcResponse(JsonRpcResponse): + result: PromptResponse | None = None + + +class UpdateJsonRpcNotification(JsonRpcNotification): + method: str = "session/update" + params: SessionNotification | None = None + + +class RequestPermissionJsonRpcRequest(JsonRpcRequest): + method: str = "session/request_permission" + params: RequestPermissionRequest | None = None + + +class RequestPermissionJsonRpcResponse(JsonRpcResponse): + result: RequestPermissionResponse | None = None + + +class ReadTextFileJsonRpcRequest(JsonRpcRequest): + method: str = "fs/read_text_file" + params: ReadTextFileRequest | None = None + + +class ReadTextFileJsonRpcResponse(JsonRpcResponse): + result: ReadTextFileResponse | None = None + + +class WriteTextFileJsonRpcRequest(JsonRpcRequest): + method: str = "fs/write_text_file" + params: WriteTextFileRequest | None = None + + +class WriteTextFileJsonRpcResponse(JsonRpcResponse): + result: None = None + + +async def get_acp_agent_process( + mock: bool = True, mock_env: dict[str, str] | None = None +) -> AsyncGenerator[asyncio.subprocess.Process]: + current_env = os.environ.copy() + cmd = ["uv", "run", MOCK_ENTRYPOINT_PATH if mock else "vibe-acp"] + + process = await asyncio.create_subprocess_exec( + *cmd, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + cwd=TESTS_ROOT.parent, + env={ + **current_env, + **(mock_env or {}), + **({"MISTRAL_API_KEY": "mock"} if mock else {}), + }, + ) + + try: + yield process + finally: + # Cleanup + if process.returncode is None: + process.terminate() + try: + await asyncio.wait_for(process.wait(), timeout=0.5) + except TimeoutError: + process.kill() + await process.wait() + + +async def send_json_rpc( + process: asyncio.subprocess.Process, message: JsonRpcMessage +) -> None: + if process.stdin is None: + raise RuntimeError("Process stdin not available") + + request = message.model_dump_json() + request_json = request + "\n" + process.stdin.write(request_json.encode()) + await process.stdin.drain() + + +async def read_response( + process: asyncio.subprocess.Process, timeout: float = RESPONSE_TIMEOUT +) -> str | None: + if process.stdout is None: + raise RuntimeError("Process stdout not available") + + try: + # Keep reading lines until we find a valid JSON line + while True: + line = await asyncio.wait_for(process.stdout.readline(), timeout=timeout) + + if not line: + return None + + line_str = line.decode().strip() + if not line_str: + continue + + try: + json.loads(line_str) + return line_str + except json.JSONDecodeError: + # Not JSON, skip it (it's a log message) + continue + except TimeoutError: + return None + + +async def read_response_for_id( + process: asyncio.subprocess.Process, + expected_id: int | str, + timeout: float = RESPONSE_TIMEOUT, +) -> str | None: + loop = asyncio.get_running_loop() + end_time = loop.time() + timeout + + while (remaining := end_time - loop.time()) > 0: + response = await read_response(process, timeout=remaining) + if response is None: + return None + + response_json = json.loads(response) + if response_json.get("id") == expected_id: + return response + print( + f"Skipping response with id={response_json.get('id')}, expecting {expected_id}" + ) + + return None + + +async def read_multiple_responses( + process: asyncio.subprocess.Process, + max_count: int = 10, + timeout_per_response: float = RESPONSE_TIMEOUT, +) -> list[str]: + responses = [] + for _ in range(max_count): + response = await read_response(process, timeout=timeout_per_response) + if response: + responses.append(response) + else: + break + return responses + + +def parse_conversation(message_texts: list[str]) -> list[JsonRpcMessage]: + parsed_messages: list[JsonRpcMessage] = [] + for message_text in message_texts: + message_json = json.loads(message_text) + cls = None + has_method = message_json.get("method", None) is not None + has_id = message_json.get("id", None) is not None + has_result = message_json.get("result", None) is not None + + is_request = has_method and has_id + is_notification = has_method and not has_id + is_response = has_result + + if is_request: + match message_json.get("method"): + case "session/prompt": + cls = PromptJsonRpcRequest + case "session/request_permission": + cls = RequestPermissionJsonRpcRequest + case "fs/read_text_file": + cls = ReadTextFileJsonRpcRequest + case "fs/write_text_file": + cls = WriteTextFileJsonRpcRequest + elif is_notification: + match message_json.get("method"): + case "session/update": + cls = UpdateJsonRpcNotification + elif is_response: + # For responses, since we don't know the method, we need to find + # the matching request. + matching_request = next( + ( + m + for m in parsed_messages + if isinstance(m, JsonRpcRequest) and m.id == message_json.get("id") + ), + None, + ) + if matching_request is None: + # No matching request found in the conversation, it most probably was + # not included in the conversation. We use a generic response class. + cls = JsonRpcResponse + else: + match matching_request.method: + case "session/prompt": + cls = PromptJsonRpcResponse + case "session/request_permission": + cls = RequestPermissionJsonRpcResponse + case "fs/read_text_file": + cls = ReadTextFileJsonRpcResponse + case "fs/write_text_file": + cls = WriteTextFileJsonRpcResponse + if cls is None: + raise ValueError(f"No valid message class found for {message_json}") + parsed_messages.append(cls.model_validate(message_json)) + return parsed_messages + + +async def initialize_session(acp_agent_process: asyncio.subprocess.Process) -> str: + await send_json_rpc( + acp_agent_process, + InitializeJsonRpcRequest(id=1, params=InitializeRequest(protocolVersion=1)), + ) + initialize_response = await read_response_for_id( + acp_agent_process, expected_id=1, timeout=5.0 + ) + assert initialize_response is not None + + await send_json_rpc( + acp_agent_process, + NewSessionJsonRpcRequest( + id=2, params=NewSessionRequest(cwd=str(PLAYGROUND_DIR), mcpServers=[]) + ), + ) + session_response = await read_response_for_id(acp_agent_process, expected_id=2) + assert session_response is not None + session_response_json = json.loads(session_response) + session_response_obj = NewSessionJsonRpcResponse.model_validate( + session_response_json + ) + assert session_response_obj.result is not None, "No result in response" + return session_response_obj.result.sessionId + + +class TestInitialization: + @pytest.mark.asyncio + async def test_initialize_request_response(self) -> None: + mock_env = get_mocking_env() + async for process in get_acp_agent_process(mock_env=mock_env): + await send_json_rpc( + process, + InitializeJsonRpcRequest( + id=1, params=InitializeRequest(protocolVersion=1) + ), + ) + + text_response = await read_response(process, timeout=10.0) + assert text_response is not None, "No response to initialize" + response_json = json.loads(text_response) + response = InitializeJsonRpcResponse.model_validate(response_json) + assert response.error is None, f"JSON-RPC error: {response.error}" + assert response.result is not None, "No result in response" + assert response.result.protocolVersion == 1 + assert response.result.agentCapabilities == AgentCapabilities( + loadSession=False, + promptCapabilities=PromptCapabilities( + audio=False, embeddedContext=True, image=False + ), + mcpCapabilities=McpCapabilities(http=False, sse=False), + ) + assert response.result.agentInfo == Implementation( + name="@mistralai/mistral-vibe", title="Mistral Vibe", version="0.1.0" + ) + vibe_setup_method = next( + ( + method + for method in response.result.authMethods or [] + if method.id == "vibe-setup" + ), + None, + ) + assert vibe_setup_method is not None, "vibe-setup auth not found" + assert vibe_setup_method.field_meta is not None + assert "terminal-auth" in vibe_setup_method.field_meta.keys() + + +class TestSessionManagement: + @pytest.mark.asyncio + async def test_multiple_sessions_unique_ids(self) -> None: + mock_env = get_mocking_env(mock_chunks=[mock_llm_chunk() for _ in range(3)]) + async for process in get_acp_agent_process(mock_env=mock_env): + await send_json_rpc( + process, + InitializeJsonRpcRequest( + id=1, params=InitializeRequest(protocolVersion=1) + ), + ) + await read_response_for_id(process, expected_id=1, timeout=5.0) + + session_ids = [] + for i in range(3): + await send_json_rpc( + process, + NewSessionJsonRpcRequest( + id=i + 2, + params=NewSessionRequest( + cwd=str(PLAYGROUND_DIR), mcpServers=[] + ), + ), + ) + text_response = await read_response_for_id( + process, expected_id=i + 2, timeout=RESPONSE_TIMEOUT + ) + assert text_response is not None + response_json = json.loads(text_response) + response = NewSessionJsonRpcResponse.model_validate(response_json) + assert response.error is None, f"JSON-RPC error: {response.error}" + assert response.result is not None, "No result in response" + session_ids.append(response.result.sessionId) + + assert len(set(session_ids)) == 3 + + +class TestSessionUpdates: + @pytest.mark.asyncio + async def test_agent_message_chunk_structure(self) -> None: + mock_env = get_mocking_env([mock_llm_chunk(content="Hi") for _ in range(2)]) + async for process in get_acp_agent_process(mock_env=mock_env): + # Check stderr for error details if process failed + if process.returncode is not None and process.stderr: + stderr_data = await process.stderr.read() + if stderr_data: + # Log stderr for debugging test failures + pass # Could add proper logging here if needed + + session_id = await initialize_session(process) + + await send_json_rpc( + process, + PromptJsonRpcRequest( + id=3, + params=PromptRequest( + sessionId=session_id, + prompt=[TextContentBlock(type="text", text="Just say hi")], + ), + ), + ) + text_response = await read_response(process) + assert text_response is not None + response = UpdateJsonRpcNotification.model_validate( + json.loads(text_response) + ) + + assert response.params is not None + assert response.params.update.sessionUpdate == "agent_message_chunk" + assert response.params.update.content is not None + assert response.params.update.content.type == "text" + assert response.params.update.content.text is not None + assert response.params.update.content.text == "Hi" + + @pytest.mark.asyncio + async def test_tool_call_update_structure(self) -> None: + mock_env = get_mocking_env([ + mock_llm_chunk(content="Hey"), + mock_llm_chunk( + tool_calls=[ + ToolCall( + function=FunctionCall( + name="grep", arguments='{"pattern": "auth"}' + ), + type="function", + index=0, + ) + ], + name="bash", + finish_reason="tool_calls", + ), + mock_llm_chunk( + content="The files containing the pattern 'auth' are ...", + finish_reason="stop", + ), + ]) + async for process in get_acp_agent_process(mock_env=mock_env): + session_id = await initialize_session(process) + + await send_json_rpc( + process, + PromptJsonRpcRequest( + id=3, + params=PromptRequest( + sessionId=session_id, + prompt=[ + TextContentBlock( + type="text", + text="Show me files that are related to auth", + ) + ], + ), + ), + ) + text_responses = await read_multiple_responses(process, max_count=10) + assert len(text_responses) > 0 + responses = [ + UpdateJsonRpcNotification.model_validate(json.loads(r)) + for r in text_responses + ] + + tool_call = next( + ( + r + for r in responses + if isinstance(r, UpdateJsonRpcNotification) + and r.params is not None + and r.params.update.sessionUpdate == "tool_call" + ), + None, + ) + assert tool_call is not None + assert tool_call.params is not None + assert tool_call.params.update is not None + + assert tool_call.params.update.sessionUpdate == "tool_call" + assert tool_call.params.update.kind == "search" + assert tool_call.params.update.title == "grep: 'auth'" + assert ( + tool_call.params.update.rawInput + == '{"pattern":"auth","path":".","max_matches":null,"use_default_ignore":true}' + ) + + +async def start_session_with_request_permission( + process: asyncio.subprocess.Process, prompt: str +) -> RequestPermissionJsonRpcRequest: + session_id = await initialize_session(process) + await send_json_rpc( + process, + PromptJsonRpcRequest( + id=3, + params=PromptRequest( + sessionId=session_id, + prompt=[TextContentBlock(type="text", text=prompt)], + ), + ), + ) + text_responses = await read_multiple_responses( + process, max_count=15, timeout_per_response=2.0 + ) + + responses = parse_conversation(text_responses) + last_response = responses[-1] + + assert isinstance(last_response, RequestPermissionJsonRpcRequest) + assert last_response.params is not None + assert len(last_response.params.options) == 3 + return last_response + + +@pytest.mark.skip( + reason="Disabled until we have a way to properly mock the fs and acp interactions" +) +class TestToolCallStructure: + @pytest.mark.asyncio + async def test_tool_call_request_permission_structure(self) -> None: + custom_results = [ + mock_llm_chunk(content="Hey"), + mock_llm_chunk( + tool_calls=[ + ToolCall( + function=FunctionCall( + name="write_file", + arguments='{"path":"test.txt","content":"hello, world!"' + ',"overwrite":true}', + ), + type="function", + index=0, + ) + ], + name="write_file", + finish_reason="stop", + ), + ] + mock_env = get_mocking_env(custom_results) + async for process in get_acp_agent_process(mock_env=mock_env): + session_id = await initialize_session(process) + await send_json_rpc( + process, + PromptJsonRpcRequest( + id=3, + params=PromptRequest( + sessionId=session_id, + prompt=[ + TextContentBlock( + type="text", + text="Create a new file named test.txt " + "with content 'hello, world!'", + ) + ], + ), + ), + ) + text_responses = await read_multiple_responses(process, max_count=3) + responses = parse_conversation(text_responses) + + # Look for tool call request permission updates + permission_requests = [ + r for r in responses if isinstance(r, RequestPermissionJsonRpcRequest) + ] + + assert len(permission_requests) > 0, ( + "No tool call permission requests found" + ) + + first_request = permission_requests[0] + assert first_request.params is not None + assert first_request.params.toolCall is not None + assert first_request.params.toolCall.toolCallId is not None + + @pytest.mark.asyncio + async def test_tool_call_update_approved_structure(self) -> None: + custom_results = [ + mock_llm_chunk(content="Hey"), + mock_llm_chunk( + tool_calls=[ + ToolCall( + function=FunctionCall( + name="write_file", + arguments='{"path":"test.txt","content":"hello, world!"' + ',"overwrite":true}', + ), + type="function", + index=0, + ) + ], + name="write_file", + finish_reason="tool_calls", + ), + mock_llm_chunk( + content="The file test.txt has been created", finish_reason="stop" + ), + ] + mock_env = get_mocking_env(custom_results) + async for process in get_acp_agent_process(mock_env=mock_env): + permission_request = await start_session_with_request_permission( + process, "Create a file named test.txt" + ) + assert permission_request.params is not None + selected_option_id = ToolOption.ALLOW_ONCE + await send_json_rpc( + process, + RequestPermissionJsonRpcResponse( + id=permission_request.id, + result=RequestPermissionResponse( + outcome=AllowedOutcome( + outcome="selected", optionId=selected_option_id + ) + ), + ), + ) + text_responses = await read_multiple_responses(process, max_count=7) + responses = parse_conversation(text_responses) + + approved_tool_call = next( + ( + r + for r in responses + if isinstance(r, UpdateJsonRpcNotification) + and r.method == "session/update" + and r.params is not None + and r.params.update is not None + and r.params.update.sessionUpdate == "tool_call_update" + and r.params.update.toolCallId + == (permission_request.params.toolCall.toolCallId) + and r.params.update.status == "completed" + ), + None, + ) + assert approved_tool_call is not None + + @pytest.mark.asyncio + async def test_tool_call_update_rejected_structure(self) -> None: + custom_results = [ + mock_llm_chunk(content="Hey"), + mock_llm_chunk( + tool_calls=[ + ToolCall( + function=FunctionCall( + name="write_file", + arguments='{"path":"test.txt","content":"hello, world!"' + ',"overwrite":false}', + ), + type="function", + index=0, + ) + ], + name="write_file", + finish_reason="tool_calls", + ), + mock_llm_chunk( + content="The file test.txt has not been created, " + "because you rejected the permission request", + finish_reason="stop", + ), + ] + mock_env = get_mocking_env(custom_results) + async for process in get_acp_agent_process(mock_env=mock_env): + permission_request = await start_session_with_request_permission( + process, "Create a file named test.txt" + ) + assert permission_request.params is not None + selected_option_id = ToolOption.REJECT_ONCE + + await send_json_rpc( + process, + RequestPermissionJsonRpcResponse( + id=permission_request.id, + result=RequestPermissionResponse( + outcome=AllowedOutcome( + outcome="selected", optionId=selected_option_id + ) + ), + ), + ) + text_responses = await read_multiple_responses(process, max_count=5) + responses = parse_conversation(text_responses) + + rejected_tool_call = next( + ( + r + for r in responses + if isinstance(r, UpdateJsonRpcNotification) + and r.method == "session/update" + and r.params is not None + and r.params.update.sessionUpdate == "tool_call_update" + and r.params.update.toolCallId + == (permission_request.params.toolCall.toolCallId) + and r.params.update.status == "failed" + ), + None, + ) + assert rejected_tool_call is not None + + @pytest.mark.skip(reason="Long running tool call updates are not implemented yet") + @pytest.mark.asyncio + async def test_tool_call_in_progress_update_structure(self) -> None: + custom_results = [ + mock_llm_chunk(content="Hey"), + mock_llm_chunk( + tool_calls=[ + ToolCall( + function=FunctionCall( + name="bash", + arguments='{"command":"sleep 3","timeout":null}', + ), + type="function", + ) + ], + name="bash", + finish_reason="tool_calls", + ), + mock_llm_chunk( + content="The command sleep 3 has been run", finish_reason="stop" + ), + ] + mock_env = get_mocking_env(custom_results) + async for process in get_acp_agent_process(mock_env=mock_env): + session_id = await initialize_session(process) + await send_json_rpc( + process, + PromptJsonRpcRequest( + id=3, + params=PromptRequest( + sessionId=session_id, + prompt=[ + TextContentBlock( + type="text", text="Run sleep 3 in the current directory" + ) + ], + ), + ), + ) + text_responses = await read_multiple_responses(process, max_count=4) + responses = parse_conversation(text_responses) + + # Look for tool call in progress updates + in_progress_calls = [ + r + for r in responses + if isinstance(r, UpdateJsonRpcNotification) + and r.params is not None + and r.params.update.sessionUpdate == "tool_call_update" + and r.params.update.status == "in_progress" + ] + + assert len(in_progress_calls) > 0, ( + "No tool call in progress updates found for a long running command" + ) + + @pytest.mark.asyncio + async def test_tool_call_result_update_failure_structure(self) -> None: + custom_results = [ + mock_llm_chunk(content="Hey"), + mock_llm_chunk( + tool_calls=[ + ToolCall( + function=FunctionCall( + name="write_file", + arguments='{"path":"/test.txt","content":"hello, world!"' + ',"overwrite":true}', + ), + type="function", + index=0, + ) + ], + name="write_file", + finish_reason="tool_calls", + ), + mock_llm_chunk( + content="The file /test.txt has not been created " + "because it's outside the project directory", + finish_reason="stop", + ), + ] + mock_env = get_mocking_env(custom_results) + async for process in get_acp_agent_process(mock_env=mock_env): + permission_request = await start_session_with_request_permission( + process, "Create a file named /test.txt" + ) + assert permission_request.params is not None + selected_option_id = ToolOption.ALLOW_ONCE + await send_json_rpc( + process, + RequestPermissionJsonRpcResponse( + id=permission_request.id, + result=RequestPermissionResponse( + outcome=AllowedOutcome( + outcome="selected", optionId=selected_option_id + ) + ), + ), + ) + text_responses = await read_multiple_responses(process, max_count=7) + responses = parse_conversation(text_responses) + + # Look for tool call result failure updates + failure_result = next( + ( + r + for r in responses + if isinstance(r, UpdateJsonRpcNotification) + and r.params is not None + and r.params.update.sessionUpdate == "tool_call_update" + and r.params.update.status == "failed" + and r.params.update.rawOutput is not None + and r.params.update.toolCallId is not None + ), + None, + ) + + assert failure_result is not None + + +class TestCancellationStructure: + @pytest.mark.skip( + reason="Proper cancellation is not implemented yet, we still need to return " + "the right end_turn and be able to cancel at any point in time " + "(and not only at tool call time)" + ) + @pytest.mark.asyncio + async def test_tool_call_update_cancelled_structure(self) -> None: + custom_results = [ + mock_llm_chunk(content="Hey"), + mock_llm_chunk( + tool_calls=[ + ToolCall( + function=FunctionCall( + name="write_file", + arguments='{"path":"test.txt","content":"hello, world!"' + ',"overwrite":false}', + ), + type="function", + index=0, + ) + ], + name="write_file", + finish_reason="tool_calls", + ), + mock_llm_chunk( + content="The file test.txt has not been created, " + "because you cancelled the permission request", + finish_reason="stop", + ), + ] + mock_env = get_mocking_env(custom_results) + async for process in get_acp_agent_process(mock_env=mock_env): + permission_request = await start_session_with_request_permission( + process, "Create a file named test.txt" + ) + assert permission_request.params is not None + + await send_json_rpc( + process, + RequestPermissionJsonRpcResponse( + id=permission_request.id, + result=RequestPermissionResponse( + outcome=DeniedOutcome(outcome="cancelled") + ), + ), + ) + text_responses = await read_multiple_responses(process, max_count=5) + responses = parse_conversation(text_responses) + + assert len(responses) == 2, ( + "There should be only 2 responses: " + "the tool call update and the prompt end turn" + ) + + cancelled_tool_call = next( + ( + r + for r in responses + if isinstance(r, UpdateJsonRpcNotification) + and r.method == "session/update" + and r.params is not None + and r.params.update.sessionUpdate == "tool_call_update" + and r.params.update.toolCallId + == (permission_request.params.toolCall.toolCallId) + and r.params.update.status == "failed" + ), + None, + ) + assert cancelled_tool_call is not None + + cancelled_prompt_response = next( + ( + r + for r in responses + if isinstance(r, PromptJsonRpcResponse) + and r.result is not None + and r.result.stopReason == "cancelled" + ), + None, + ) + assert cancelled_prompt_response is not None diff --git a/tests/acp/test_bash.py b/tests/acp/test_bash.py new file mode 100644 index 0000000..798c685 --- /dev/null +++ b/tests/acp/test_bash.py @@ -0,0 +1,536 @@ +from __future__ import annotations + +import asyncio + +from acp.schema import TerminalOutputResponse, WaitForTerminalExitResponse +import pytest + +from vibe.acp.tools.builtins.bash import AcpBashState, Bash +from vibe.core.tools.base import ToolError +from vibe.core.tools.builtins.bash import BashArgs, BashResult, BashToolConfig + + +class MockTerminalHandle: + def __init__( + self, + terminal_id: str = "test_terminal_123", + exit_code: int | None = 0, + output: str = "test output", + wait_delay: float = 0.01, + ) -> None: + self.id = terminal_id + self._exit_code = exit_code + self._output = output + self._wait_delay = wait_delay + self._killed = False + + async def wait_for_exit(self) -> WaitForTerminalExitResponse: + await asyncio.sleep(self._wait_delay) + return WaitForTerminalExitResponse(exitCode=self._exit_code) + + async def current_output(self) -> TerminalOutputResponse: + return TerminalOutputResponse(output=self._output, truncated=False) + + async def kill(self) -> None: + self._killed = True + + async def release(self) -> None: + pass + + +class MockConnection: + def __init__(self, terminal_handle: MockTerminalHandle | None = None) -> None: + self._terminal_handle = terminal_handle or MockTerminalHandle() + self._create_terminal_called = False + self._session_update_called = False + self._create_terminal_error: Exception | None = None + self._last_create_request = None + + async def createTerminal(self, request) -> MockTerminalHandle: + self._create_terminal_called = True + self._last_create_request = request + if self._create_terminal_error: + raise self._create_terminal_error + return self._terminal_handle + + async def sessionUpdate(self, notification) -> None: + self._session_update_called = True + + +@pytest.fixture +def mock_connection() -> MockConnection: + return MockConnection() + + +@pytest.fixture +def acp_bash_tool(mock_connection: MockConnection) -> Bash: + config = BashToolConfig() + # Use model_construct to bypass Pydantic validation for testing + state = AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session_123", + tool_call_id="test_tool_call_456", + ) + return Bash(config=config, state=state) + + +class TestAcpBashBasic: + def test_get_name(self) -> None: + assert Bash.get_name() == "bash" + + def test_get_summary_simple_command(self) -> None: + args = BashArgs(command="ls") + display = Bash.get_summary(args) + assert display == "ls" + + def test_get_summary_with_timeout(self) -> None: + args = BashArgs(command="ls", timeout=10) + display = Bash.get_summary(args) + assert display == "ls (timeout 10s)" + + def test_parse_command_simple(self) -> None: + tool = Bash(config=BashToolConfig(), state=AcpBashState()) + env, command, args = tool._parse_command("ls") + assert env == [] + assert command == "ls" + assert args == [] + + def test_parse_command_with_args(self) -> None: + tool = Bash(config=BashToolConfig(), state=AcpBashState()) + env, command, args = tool._parse_command("ls -la src") + assert env == [] + assert command == "ls" + assert args == ["-la", "src"] + + def test_parse_command_with_env(self) -> None: + tool = Bash(config=BashToolConfig(), state=AcpBashState()) + env, command, args = tool._parse_command("NODE_ENV=test DEBUG=1 npm test") + assert len(env) == 2 + assert env[0].name == "NODE_ENV" + assert env[0].value == "test" + assert env[1].name == "DEBUG" + assert env[1].value == "1" + assert command == "npm" + assert args == ["test"] + + def test_parse_command_with_env_value_contains_equals(self) -> None: + tool = Bash(config=BashToolConfig(), state=AcpBashState()) + env, command, args = tool._parse_command( + "PATH=/usr/bin:/usr/local/bin echo hello" + ) + assert len(env) == 1 + assert env[0].name == "PATH" + assert env[0].value == "/usr/bin:/usr/local/bin" + assert command == "echo" + assert args == ["hello"] + + +class TestAcpBashExecution: + @pytest.mark.asyncio + async def test_run_success( + self, acp_bash_tool: Bash, mock_connection: MockConnection + ) -> None: + from pathlib import Path + + args = BashArgs(command="echo hello") + result = await acp_bash_tool.run(args) + + assert isinstance(result, BashResult) + assert result.stdout == "test output" + assert result.stderr == "" + assert result.returncode == 0 + assert mock_connection._create_terminal_called + + # Verify CreateTerminalRequest was created correctly + request = mock_connection._last_create_request + assert request is not None + assert request.sessionId == "test_session_123" + assert request.command == "echo" + assert request.args == ["hello"] + assert request.cwd == str(Path.cwd()) # effective_workdir defaults to cwd + + @pytest.mark.asyncio + async def test_run_creates_terminal_with_env_vars( + self, mock_connection: MockConnection + ) -> None: + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="NODE_ENV=test npm run build") + await tool.run(args) + + request = mock_connection._last_create_request + assert request is not None + assert len(request.env) == 1 + assert request.env[0].name == "NODE_ENV" + assert request.env[0].value == "test" + assert request.command == "npm" + assert request.args == ["run", "build"] + + @pytest.mark.asyncio + async def test_run_with_nonzero_exit_code( + self, mock_connection: MockConnection + ) -> None: + custom_handle = MockTerminalHandle( + terminal_id="custom_terminal", exit_code=1, output="error: command failed" + ) + mock_connection._terminal_handle = custom_handle + + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="test_command") + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == "Command failed: 'test_command'\nReturn code: 1\nStdout: error: command failed" + ) + + @pytest.mark.asyncio + async def test_run_create_terminal_failure( + self, mock_connection: MockConnection + ) -> None: + mock_connection._create_terminal_error = RuntimeError("Connection failed") + + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="test") + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == "Failed to create terminal: RuntimeError('Connection failed')" + ) + + @pytest.mark.asyncio + async def test_run_without_connection(self) -> None: + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=None, session_id="test_session", tool_call_id="test_call" + ), + ) + + args = BashArgs(command="test") + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == "Connection not available in tool state. This tool can only be used within an ACP session." + ) + + @pytest.mark.asyncio + async def test_run_without_session_id(self) -> None: + mock_connection = MockConnection() + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id=None, + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="test") + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == "Session ID not available in tool state. This tool can only be used within an ACP session." + ) + + @pytest.mark.asyncio + async def test_run_with_none_exit_code( + self, mock_connection: MockConnection + ) -> None: + custom_handle = MockTerminalHandle( + terminal_id="none_exit_terminal", exit_code=None, output="output" + ) + mock_connection._terminal_handle = custom_handle + + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="test_command") + result = await tool.run(args) + + assert result.returncode == 0 + assert result.stdout == "output" + + +class TestAcpBashTimeout: + @pytest.mark.asyncio + async def test_run_with_timeout_raises_error_and_kills( + self, mock_connection: MockConnection + ) -> None: + custom_handle = MockTerminalHandle( + terminal_id="timeout_terminal", + output="partial output", + wait_delay=20, # Longer than the 1 second timeout + ) + mock_connection._terminal_handle = custom_handle + + # Use a config with different default timeout to verify args timeout overrides it + tool = Bash( + config=BashToolConfig(default_timeout=30), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="slow_command", timeout=1) + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert str(exc_info.value) == "Command timed out after 1s: 'slow_command'" + assert custom_handle._killed + + @pytest.mark.asyncio + async def test_run_timeout_handles_kill_failure( + self, mock_connection: MockConnection + ) -> None: + custom_handle = MockTerminalHandle( + terminal_id="kill_failure_terminal", + wait_delay=20, # Longer than the 1 second timeout + ) + mock_connection._terminal_handle = custom_handle + + async def failing_kill() -> None: + raise RuntimeError("Kill failed") + + custom_handle.kill = failing_kill + + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="slow_command", timeout=1) + # Should still raise timeout error even if kill fails + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert str(exc_info.value) == "Command timed out after 1s: 'slow_command'" + + +class TestAcpBashEmbedding: + @pytest.mark.asyncio + async def test_run_with_embedding(self, mock_connection: MockConnection) -> None: + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="test") + await tool.run(args) + + assert mock_connection._session_update_called + + @pytest.mark.asyncio + async def test_run_embedding_without_tool_call_id( + self, mock_connection: MockConnection + ) -> None: + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id=None, + ), + ) + + args = BashArgs(command="test") + await tool.run(args) + + # Embedding should be skipped when tool_call_id is None + assert not mock_connection._session_update_called + + @pytest.mark.asyncio + async def test_run_embedding_handles_exception( + self, mock_connection: MockConnection + ) -> None: + # Make sessionUpdate raise an exception + async def failing_session_update(notification) -> None: + raise RuntimeError("Session update failed") + + mock_connection.sessionUpdate = failing_session_update + + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="test") + # Should not raise, embedding failure is silently ignored + result = await tool.run(args) + + assert result is not None + assert result.stdout == "test output" + + +class TestAcpBashConfig: + @pytest.mark.asyncio + async def test_run_uses_config_default_timeout( + self, mock_connection: MockConnection + ) -> None: + custom_handle = MockTerminalHandle( + terminal_id="config_timeout_terminal", + wait_delay=0.01, # Shorter than config timeout + ) + mock_connection._terminal_handle = custom_handle + + tool = Bash( + config=BashToolConfig(default_timeout=30), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="fast", timeout=None) + result = await tool.run(args) + + # Should succeed with config timeout + assert result.returncode == 0 + + +class TestAcpBashCleanup: + @pytest.mark.asyncio + async def test_run_releases_terminal_on_success( + self, mock_connection: MockConnection + ) -> None: + custom_handle = MockTerminalHandle(terminal_id="cleanup_terminal") + mock_connection._terminal_handle = custom_handle + + release_called = False + + async def mock_release() -> None: + nonlocal release_called + release_called = True + + custom_handle.release = mock_release + + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="test") + await tool.run(args) + + assert release_called + + @pytest.mark.asyncio + async def test_run_releases_terminal_on_timeout( + self, mock_connection: MockConnection + ) -> None: + # The handle will wait 2 seconds, but timeout is 1 second, + # so asyncio.wait_for() will raise TimeoutError + custom_handle = MockTerminalHandle( + terminal_id="timeout_cleanup_terminal", + wait_delay=2.0, # Longer than the 1 second timeout + ) + mock_connection._terminal_handle = custom_handle + + release_called = False + + async def mock_release() -> None: + nonlocal release_called + release_called = True + + custom_handle.release = mock_release + + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="slow", timeout=1) + # Timeout raises an error, but terminal should still be released + try: + await tool.run(args) + except ToolError: + pass + + assert release_called + + @pytest.mark.asyncio + async def test_run_handles_release_failure( + self, mock_connection: MockConnection + ) -> None: + custom_handle = MockTerminalHandle(terminal_id="release_failure_terminal") + + async def failing_release() -> None: + raise RuntimeError("Release failed") + + custom_handle.release = failing_release + mock_connection._terminal_handle = custom_handle + + tool = Bash( + config=BashToolConfig(), + state=AcpBashState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = BashArgs(command="test") + # Should not raise, release failure is silently ignored + result = await tool.run(args) + + assert result is not None + assert result.stdout == "test output" diff --git a/tests/acp/test_content.py b/tests/acp/test_content.py new file mode 100644 index 0000000..8fee387 --- /dev/null +++ b/tests/acp/test_content.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +from pathlib import Path +from unittest.mock import patch + +from acp import AgentSideConnection, NewSessionRequest, PromptRequest +from acp.schema import ( + EmbeddedResourceContentBlock, + ResourceContentBlock, + TextContentBlock, + TextResourceContents, +) +import pytest + +from tests.stubs.fake_backend import FakeBackend +from tests.stubs.fake_connection import FakeAgentSideConnection +from vibe.acp.acp_agent import VibeAcpAgent +from vibe.core.agent import Agent +from vibe.core.types import LLMChunk, LLMMessage, LLMUsage, Role + + +@pytest.fixture +def backend() -> FakeBackend: + backend = FakeBackend( + results=[ + LLMChunk( + message=LLMMessage(role=Role.assistant, content="Hi"), + finish_reason="end_turn", + usage=LLMUsage(prompt_tokens=1, completion_tokens=1), + ) + ] + ) + return backend + + +@pytest.fixture +def acp_agent(backend: FakeBackend) -> VibeAcpAgent: + class PatchedAgent(Agent): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs, backend=backend) + + patch("vibe.acp.acp_agent.VibeAgent", side_effect=PatchedAgent).start() + + vibe_acp_agent: VibeAcpAgent | None = None + + def _create_agent(connection: AgentSideConnection) -> VibeAcpAgent: + nonlocal vibe_acp_agent + vibe_acp_agent = VibeAcpAgent(connection) + return vibe_acp_agent + + FakeAgentSideConnection(_create_agent) + return vibe_acp_agent # pyright: ignore[reportReturnType] + + +class TestACPContent: + @pytest.mark.asyncio + async def test_text_content( + self, acp_agent: VibeAcpAgent, backend: FakeBackend + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + prompt_request = PromptRequest( + prompt=[TextContentBlock(type="text", text="Say hi")], + sessionId=session_response.sessionId, + ) + + response = await acp_agent.prompt(params=prompt_request) + + assert response.stopReason == "end_turn" + user_message = next( + (msg for msg in backend._requests_messages[0] if msg.role == Role.user), + None, + ) + assert user_message is not None, "User message not found in backend requests" + assert user_message.content == "Say hi" + + @pytest.mark.asyncio + async def test_resource_content( + self, acp_agent: VibeAcpAgent, backend: FakeBackend + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + prompt_request = PromptRequest( + prompt=[ + TextContentBlock(type="text", text="What does this file do?"), + EmbeddedResourceContentBlock( + type="resource", + resource=TextResourceContents( + uri="file:///home/my_file.py", + text="def hello():\n print('Hello, world!')", + mimeType="text/x-python", + ), + ), + ], + sessionId=session_response.sessionId, + ) + + response = await acp_agent.prompt(params=prompt_request) + + assert response.stopReason == "end_turn" + user_message = next( + (msg for msg in backend._requests_messages[0] if msg.role == Role.user), + None, + ) + assert user_message is not None, "User message not found in backend requests" + expected_content = ( + "What does this file do?" + + "\n\npath: file:///home/my_file.py" + + "\ncontent: def hello():\n print('Hello, world!')" + ) + assert user_message.content == expected_content + + @pytest.mark.asyncio + async def test_resource_link_content( + self, acp_agent: VibeAcpAgent, backend: FakeBackend + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + prompt_request = PromptRequest( + prompt=[ + TextContentBlock(type="text", text="Analyze this resource"), + ResourceContentBlock( + type="resource_link", + uri="file:///home/document.pdf", + name="document.pdf", + title="Important Document", + description="A PDF document containing project specifications", + mimeType="application/pdf", + size=1024, + ), + ], + sessionId=session_response.sessionId, + ) + + response = await acp_agent.prompt(params=prompt_request) + + assert response.stopReason == "end_turn" + user_message = next( + (msg for msg in backend._requests_messages[0] if msg.role == Role.user), + None, + ) + assert user_message is not None, "User message not found in backend requests" + expected_content = ( + "Analyze this resource" + + "\n\nuri: file:///home/document.pdf" + + "\nname: document.pdf" + + "\ntitle: Important Document" + + "\ndescription: A PDF document containing project specifications" + + "\nmimeType: application/pdf" + + "\nsize: 1024" + ) + assert user_message.content == expected_content + + @pytest.mark.asyncio + async def test_resource_link_minimal( + self, acp_agent: VibeAcpAgent, backend: FakeBackend + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + prompt_request = PromptRequest( + prompt=[ + ResourceContentBlock( + type="resource_link", + uri="file:///home/minimal.txt", + name="minimal.txt", + ) + ], + sessionId=session_response.sessionId, + ) + + response = await acp_agent.prompt(params=prompt_request) + + assert response.stopReason == "end_turn" + user_message = next( + (msg for msg in backend._requests_messages[0] if msg.role == Role.user), + None, + ) + assert user_message is not None, "User message not found in backend requests" + expected_content = "uri: file:///home/minimal.txt\nname: minimal.txt" + assert user_message.content == expected_content diff --git a/tests/acp/test_multi_session.py b/tests/acp/test_multi_session.py new file mode 100644 index 0000000..7aabc20 --- /dev/null +++ b/tests/acp/test_multi_session.py @@ -0,0 +1,161 @@ +from __future__ import annotations + +import asyncio +from pathlib import Path +from typing import Any +from unittest.mock import patch +from uuid import uuid4 + +from acp import ( + PROTOCOL_VERSION, + InitializeRequest, + NewSessionRequest, + PromptRequest, + RequestError, +) +from acp.schema import TextContentBlock +import pytest +from pytest import raises + +from tests.mock.utils import mock_llm_chunk +from tests.stubs.fake_backend import FakeBackend +from tests.stubs.fake_connection import FakeAgentSideConnection +from vibe.acp.acp_agent import VibeAcpAgent +from vibe.core.agent import Agent +from vibe.core.config import ModelConfig, VibeConfig +from vibe.core.types import Role + + +@pytest.fixture +def backend() -> FakeBackend: + backend = FakeBackend() + return backend + + +@pytest.fixture +def acp_agent(backend: FakeBackend) -> VibeAcpAgent: + config = VibeConfig( + active_model="devstral-latest", + models=[ + ModelConfig( + name="devstral-latest", provider="mistral", alias="devstral-latest" + ) + ], + ) + + class PatchedAgent(Agent): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.backend = backend + self.config = config + + patch("vibe.acp.acp_agent.VibeAgent", side_effect=PatchedAgent).start() + + vibe_acp_agent: VibeAcpAgent | None = None + + def _create_agent(connection: Any) -> VibeAcpAgent: + nonlocal vibe_acp_agent + vibe_acp_agent = VibeAcpAgent(connection) + return vibe_acp_agent + + FakeAgentSideConnection(_create_agent) + return vibe_acp_agent # pyright: ignore[reportReturnType] + + +class TestMultiSessionCore: + @pytest.mark.asyncio + async def test_different_sessions_use_different_agents( + self, acp_agent: VibeAcpAgent + ) -> None: + await acp_agent.initialize(InitializeRequest(protocolVersion=PROTOCOL_VERSION)) + session1_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session1 = acp_agent.sessions[session1_response.sessionId] + session2_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session2 = acp_agent.sessions[session2_response.sessionId] + + assert session1.id != session2.id + # Each agent should be independent + assert session1.agent is not session2.agent + assert id(session1.agent) != id(session2.agent) + + @pytest.mark.asyncio + async def test_error_on_nonexistent_session(self, acp_agent: VibeAcpAgent) -> None: + await acp_agent.initialize(InitializeRequest(protocolVersion=PROTOCOL_VERSION)) + await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + + fake_session_id = "fake-session-id-" + str(uuid4()) + + with raises(RequestError) as exc_info: + await acp_agent.prompt( + PromptRequest( + sessionId=fake_session_id, + prompt=[TextContentBlock(type="text", text="Hello, world!")], + ) + ) + + assert isinstance(exc_info.value, RequestError) + assert str(exc_info.value) == "Invalid params" + + @pytest.mark.asyncio + async def test_simultaneous_message_processing( + self, acp_agent: VibeAcpAgent, backend: FakeBackend + ) -> None: + await acp_agent.initialize(InitializeRequest(protocolVersion=PROTOCOL_VERSION)) + session1_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session1 = acp_agent.sessions[session1_response.sessionId] + session2_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session2 = acp_agent.sessions[session2_response.sessionId] + + backend._chunks = [ + mock_llm_chunk(content="Response 1", finish_reason="stop"), + mock_llm_chunk(content="Response 2", finish_reason="stop"), + ] + + async def run_session1(): + await acp_agent.prompt( + PromptRequest( + sessionId=session1.id, + prompt=[TextContentBlock(type="text", text="Prompt for session 1")], + ) + ) + + async def run_session2(): + await acp_agent.prompt( + PromptRequest( + sessionId=session2.id, + prompt=[TextContentBlock(type="text", text="Prompt for session 2")], + ) + ) + + await asyncio.gather(run_session1(), run_session2()) + + user_message1 = next( + (msg for msg in session1.agent.messages if msg.role == Role.user), None + ) + assert user_message1 is not None + assert user_message1.content == "Prompt for session 1" + assistant_message1 = next( + (msg for msg in session1.agent.messages if msg.role == Role.assistant), None + ) + assert assistant_message1 is not None + assert assistant_message1.content == "Response 1" + user_message2 = next( + (msg for msg in session2.agent.messages if msg.role == Role.user), None + ) + assert user_message2 is not None + assert user_message2.content == "Prompt for session 2" + assistant_message2 = next( + (msg for msg in session2.agent.messages if msg.role == Role.assistant), None + ) + assert assistant_message2 is not None + assert assistant_message2.content == "Response 2" diff --git a/tests/acp/test_new_session.py b/tests/acp/test_new_session.py new file mode 100644 index 0000000..fd7b2b4 --- /dev/null +++ b/tests/acp/test_new_session.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from pathlib import Path +from unittest.mock import patch + +from acp import AgentSideConnection, NewSessionRequest, SetSessionModelRequest +import pytest + +from tests.stubs.fake_backend import FakeBackend +from tests.stubs.fake_connection import FakeAgentSideConnection +from vibe.acp.acp_agent import VibeAcpAgent +from vibe.acp.utils import VibeSessionMode +from vibe.core.agent import Agent +from vibe.core.config import ModelConfig, VibeConfig +from vibe.core.types import LLMChunk, LLMMessage, LLMUsage, Role + + +@pytest.fixture +def backend() -> FakeBackend: + backend = FakeBackend( + results=[ + LLMChunk( + message=LLMMessage(role=Role.assistant, content="Hi"), + finish_reason="end_turn", + usage=LLMUsage(prompt_tokens=1, completion_tokens=1), + ) + ] + ) + return backend + + +@pytest.fixture +def acp_agent(backend: FakeBackend) -> VibeAcpAgent: + config = VibeConfig( + active_model="devstral-latest", + models=[ + ModelConfig( + name="devstral-latest", provider="mistral", alias="devstral-latest" + ), + ModelConfig( + name="devstral-small", provider="mistral", alias="devstral-small" + ), + ], + ) + + class PatchedAgent(Agent): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **{**kwargs, "backend": backend}) + self.config = config + + patch("vibe.acp.acp_agent.VibeAgent", side_effect=PatchedAgent).start() + + vibe_acp_agent: VibeAcpAgent | None = None + + def _create_agent(connection: AgentSideConnection) -> VibeAcpAgent: + nonlocal vibe_acp_agent + vibe_acp_agent = VibeAcpAgent(connection) + return vibe_acp_agent + + FakeAgentSideConnection(_create_agent) + return vibe_acp_agent # pyright: ignore[reportReturnType] + + +class TestACPNewSession: + @pytest.mark.asyncio + async def test_new_session_response_structure( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + + assert session_response.sessionId is not None + acp_session = next( + ( + s + for s in acp_agent.sessions.values() + if s.id == session_response.sessionId + ), + None, + ) + assert acp_session is not None + assert ( + acp_session.agent.interaction_logger.session_id + == session_response.sessionId + ) + + assert session_response.sessionId == acp_session.agent.session_id + + assert session_response.models is not None + assert session_response.models.currentModelId is not None + assert session_response.models.availableModels is not None + assert len(session_response.models.availableModels) == 2 + + assert session_response.models.currentModelId == "devstral-latest" + assert session_response.models.availableModels[0].modelId == "devstral-latest" + assert session_response.models.availableModels[0].name == "devstral-latest" + assert session_response.models.availableModels[1].modelId == "devstral-small" + assert session_response.models.availableModels[1].name == "devstral-small" + + assert session_response.modes is not None + assert session_response.modes.currentModeId is not None + assert session_response.modes.availableModes is not None + assert len(session_response.modes.availableModes) == 2 + + assert session_response.modes.currentModeId == VibeSessionMode.APPROVAL_REQUIRED + assert ( + session_response.modes.availableModes[0].id + == VibeSessionMode.APPROVAL_REQUIRED + ) + assert session_response.modes.availableModes[0].name == "Approval Required" + assert ( + session_response.modes.availableModes[1].id == VibeSessionMode.AUTO_APPROVE + ) + assert session_response.modes.availableModes[1].name == "Auto Approve" + + @pytest.mark.skip(reason="TODO: Fix this test") + @pytest.mark.asyncio + async def test_new_session_preserves_model_after_set_model( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + + assert session_response.models is not None + assert session_response.models.currentModelId == "devstral-latest" + + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="devstral-small") + ) + assert response is not None + + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + + assert session_response.models is not None + assert session_response.models.currentModelId == "devstral-small" diff --git a/tests/acp/test_read_file.py b/tests/acp/test_read_file.py new file mode 100644 index 0000000..78fc2b1 --- /dev/null +++ b/tests/acp/test_read_file.py @@ -0,0 +1,240 @@ +from __future__ import annotations + +from pathlib import Path + +from acp import ReadTextFileRequest, ReadTextFileResponse +import pytest + +from vibe.acp.tools.builtins.read_file import AcpReadFileState, ReadFile +from vibe.core.tools.base import ToolError +from vibe.core.tools.builtins.read_file import ( + ReadFileArgs, + ReadFileResult, + ReadFileToolConfig, +) + + +class MockConnection: + def __init__( + self, + file_content: str = "line 1\nline 2\nline 3", + read_error: Exception | None = None, + ) -> None: + self._file_content = file_content + self._read_error = read_error + self._read_text_file_called = False + self._session_update_called = False + self._last_read_request: ReadTextFileRequest | None = None + + async def readTextFile(self, request: ReadTextFileRequest) -> ReadTextFileResponse: + self._read_text_file_called = True + self._last_read_request = request + + if self._read_error: + raise self._read_error + + content = self._file_content + if request.line is not None or request.limit is not None: + lines = content.splitlines(keepends=True) + start_line = (request.line or 1) - 1 # Convert to 0-indexed + end_line = ( + start_line + request.limit if request.limit is not None else len(lines) + ) + lines = lines[start_line:end_line] + content = "".join(lines) + + return ReadTextFileResponse(content=content) + + async def sessionUpdate(self, notification) -> None: + self._session_update_called = True + + +@pytest.fixture +def mock_connection() -> MockConnection: + return MockConnection() + + +@pytest.fixture +def acp_read_file_tool(mock_connection: MockConnection, tmp_path: Path) -> ReadFile: + config = ReadFileToolConfig(workdir=tmp_path) + state = AcpReadFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session_123", + tool_call_id="test_tool_call_456", + ) + return ReadFile(config=config, state=state) + + +class TestAcpReadFileBasic: + def test_get_name(self) -> None: + assert ReadFile.get_name() == "read_file" + + +class TestAcpReadFileExecution: + @pytest.mark.asyncio + async def test_run_success( + self, + acp_read_file_tool: ReadFile, + mock_connection: MockConnection, + tmp_path: Path, + ) -> None: + test_file = tmp_path / "test_file.txt" + test_file.touch() + args = ReadFileArgs(path=str(test_file)) + result = await acp_read_file_tool.run(args) + + assert isinstance(result, ReadFileResult) + assert result.path == str(test_file) + assert result.content == "line 1\nline 2\nline 3" + assert result.lines_read == 3 + assert mock_connection._read_text_file_called + assert mock_connection._session_update_called + + # Verify ReadTextFileRequest was created correctly + request = mock_connection._last_read_request + assert request is not None + assert request.sessionId == "test_session_123" + assert request.path == str(test_file) + assert request.line is None # offset=0 means no line specified + assert request.limit is None + + @pytest.mark.asyncio + async def test_run_with_offset( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + test_file = tmp_path / "test_file.txt" + test_file.touch() + tool = ReadFile( + config=ReadFileToolConfig(workdir=tmp_path), + state=AcpReadFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = ReadFileArgs(path=str(test_file), offset=1) + result = await tool.run(args) + + assert result.lines_read == 2 + assert result.content == "line 2\nline 3" + + request = mock_connection._last_read_request + assert request is not None + assert request.line == 2 # offset=1 means line 2 (1-indexed) + + @pytest.mark.asyncio + async def test_run_with_limit( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + test_file = tmp_path / "test_file.txt" + test_file.touch() + tool = ReadFile( + config=ReadFileToolConfig(workdir=tmp_path), + state=AcpReadFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = ReadFileArgs(path=str(test_file), limit=2) + result = await tool.run(args) + + assert result.lines_read == 2 + assert result.content == "line 1\nline 2\n" + + request = mock_connection._last_read_request + assert request is not None + assert request.limit == 2 + + @pytest.mark.asyncio + async def test_run_with_offset_and_limit( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + test_file = tmp_path / "test_file.txt" + test_file.touch() + tool = ReadFile( + config=ReadFileToolConfig(workdir=tmp_path), + state=AcpReadFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = ReadFileArgs(path=str(test_file), offset=1, limit=1) + result = await tool.run(args) + + assert result.lines_read == 1 + assert result.content == "line 2\n" + + request = mock_connection._last_read_request + assert request is not None + assert request.line == 2 + assert request.limit == 1 + + @pytest.mark.asyncio + async def test_run_read_error( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + mock_connection._read_error = RuntimeError("File not found") + test_file = tmp_path / "test.txt" + test_file.touch() + tool = ReadFile( + config=ReadFileToolConfig(workdir=tmp_path), + state=AcpReadFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + args = ReadFileArgs(path=str(test_file)) + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert str(exc_info.value) == f"Error reading {test_file}: File not found" + + @pytest.mark.asyncio + async def test_run_without_connection(self, tmp_path: Path) -> None: + test_file = tmp_path / "test.txt" + test_file.touch() + tool = ReadFile( + config=ReadFileToolConfig(workdir=tmp_path), + state=AcpReadFileState.model_construct( + connection=None, session_id="test_session", tool_call_id="test_call" + ), + ) + + args = ReadFileArgs(path=str(test_file)) + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == "Connection not available in tool state. This tool can only be used within an ACP session." + ) + + @pytest.mark.asyncio + async def test_run_without_session_id(self, tmp_path: Path) -> None: + test_file = tmp_path / "test.txt" + test_file.touch() + mock_connection = MockConnection() + tool = ReadFile( + config=ReadFileToolConfig(workdir=tmp_path), + state=AcpReadFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id=None, + tool_call_id="test_call", + ), + ) + + args = ReadFileArgs(path=str(test_file)) + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == "Session ID not available in tool state. This tool can only be used within an ACP session." + ) diff --git a/tests/acp/test_search_replace.py b/tests/acp/test_search_replace.py new file mode 100644 index 0000000..1907ad7 --- /dev/null +++ b/tests/acp/test_search_replace.py @@ -0,0 +1,339 @@ +from __future__ import annotations + +from pathlib import Path + +from acp import ReadTextFileRequest, ReadTextFileResponse, WriteTextFileRequest +import pytest + +from vibe.acp.tools.builtins.search_replace import AcpSearchReplaceState, SearchReplace +from vibe.core.tools.base import ToolError +from vibe.core.tools.builtins.search_replace import ( + SearchReplaceArgs, + SearchReplaceConfig, + SearchReplaceResult, +) +from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class MockConnection: + def __init__( + self, + file_content: str = "original line 1\noriginal line 2\noriginal line 3", + read_error: Exception | None = None, + write_error: Exception | None = None, + ) -> None: + self._file_content = file_content + self._read_error = read_error + self._write_error = write_error + self._read_text_file_called = False + self._write_text_file_called = False + self._session_update_called = False + self._last_read_request: ReadTextFileRequest | None = None + self._last_write_request: WriteTextFileRequest | None = None + self._write_calls: list[WriteTextFileRequest] = [] + + async def readTextFile(self, request: ReadTextFileRequest) -> ReadTextFileResponse: + self._read_text_file_called = True + self._last_read_request = request + + if self._read_error: + raise self._read_error + + return ReadTextFileResponse(content=self._file_content) + + async def writeTextFile(self, request: WriteTextFileRequest) -> None: + self._write_text_file_called = True + self._last_write_request = request + self._write_calls.append(request) + + if self._write_error: + raise self._write_error + + async def sessionUpdate(self, notification) -> None: + self._session_update_called = True + + +@pytest.fixture +def mock_connection() -> MockConnection: + return MockConnection() + + +@pytest.fixture +def acp_search_replace_tool( + mock_connection: MockConnection, tmp_path: Path +) -> SearchReplace: + config = SearchReplaceConfig(workdir=tmp_path) + state = AcpSearchReplaceState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session_123", + tool_call_id="test_tool_call_456", + ) + return SearchReplace(config=config, state=state) + + +class TestAcpSearchReplaceBasic: + def test_get_name(self) -> None: + assert SearchReplace.get_name() == "search_replace" + + +class TestAcpSearchReplaceExecution: + @pytest.mark.asyncio + async def test_run_success( + self, + acp_search_replace_tool: SearchReplace, + mock_connection: MockConnection, + tmp_path: Path, + ) -> None: + test_file = tmp_path / "test_file.txt" + test_file.write_text("original line 1\noriginal line 2\noriginal line 3") + search_replace_content = ( + "<<<<<<< SEARCH\noriginal line 2\n=======\nmodified line 2\n>>>>>>> REPLACE" + ) + args = SearchReplaceArgs( + file_path=str(test_file), content=search_replace_content + ) + result = await acp_search_replace_tool.run(args) + + assert isinstance(result, SearchReplaceResult) + assert result.file == str(test_file) + assert result.blocks_applied == 1 + assert mock_connection._read_text_file_called + assert mock_connection._write_text_file_called + assert mock_connection._session_update_called + + # Verify ReadTextFileRequest was created correctly + read_request = mock_connection._last_read_request + assert read_request is not None + assert read_request.sessionId == "test_session_123" + assert read_request.path == str(test_file) + + # Verify WriteTextFileRequest was created correctly + write_request = mock_connection._last_write_request + assert write_request is not None + assert write_request.sessionId == "test_session_123" + assert write_request.path == str(test_file) + assert ( + write_request.content == "original line 1\nmodified line 2\noriginal line 3" + ) + + @pytest.mark.asyncio + async def test_run_with_backup( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + config = SearchReplaceConfig(create_backup=True, workdir=tmp_path) + tool = SearchReplace( + config=config, + state=AcpSearchReplaceState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + test_file = tmp_path / "test_file.txt" + test_file.write_text("original line 1\noriginal line 2\noriginal line 3") + search_replace_content = ( + "<<<<<<< SEARCH\noriginal line 1\n=======\nmodified line 1\n>>>>>>> REPLACE" + ) + args = SearchReplaceArgs( + file_path=str(test_file), content=search_replace_content + ) + result = await tool.run(args) + + assert result.blocks_applied == 1 + # Should have written the main file and the backup + assert len(mock_connection._write_calls) >= 1 + # Check if backup was written (it should be written to .bak file) + assert sum(w.path.endswith(".bak") for w in mock_connection._write_calls) == 1 + + @pytest.mark.asyncio + async def test_run_read_error( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + mock_connection._read_error = RuntimeError("File not found") + + tool = SearchReplace( + config=SearchReplaceConfig(workdir=tmp_path), + state=AcpSearchReplaceState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + test_file = tmp_path / "test.txt" + test_file.touch() + search_replace_content = "<<<<<<< SEARCH\nold\n=======\nnew\n>>>>>>> REPLACE" + args = SearchReplaceArgs( + file_path=str(test_file), content=search_replace_content + ) + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == f"Unexpected error reading {test_file}: File not found" + ) + + @pytest.mark.asyncio + async def test_run_write_error( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + mock_connection._write_error = RuntimeError("Permission denied") + test_file = tmp_path / "test.txt" + test_file.touch() + mock_connection._file_content = "old" # Update mock to return correct content + + tool = SearchReplace( + config=SearchReplaceConfig(workdir=tmp_path), + state=AcpSearchReplaceState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + search_replace_content = "<<<<<<< SEARCH\nold\n=======\nnew\n>>>>>>> REPLACE" + args = SearchReplaceArgs( + file_path=str(test_file), content=search_replace_content + ) + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert str(exc_info.value) == f"Error writing {test_file}: Permission denied" + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "connection,session_id,expected_error", + [ + ( + None, + "test_session", + "Connection not available in tool state. This tool can only be used within an ACP session.", + ), + ( + MockConnection(), + None, + "Session ID not available in tool state. This tool can only be used within an ACP session.", + ), + ], + ) + async def test_run_without_required_state( + self, + tmp_path: Path, + connection: MockConnection | None, + session_id: str | None, + expected_error: str, + ) -> None: + test_file = tmp_path / "test.txt" + test_file.touch() + tool = SearchReplace( + config=SearchReplaceConfig(workdir=tmp_path), + state=AcpSearchReplaceState.model_construct( + connection=connection, # type: ignore[arg-type] + session_id=session_id, + tool_call_id="test_call", + ), + ) + + search_replace_content = "<<<<<<< SEARCH\nold\n=======\nnew\n>>>>>>> REPLACE" + args = SearchReplaceArgs( + file_path=str(test_file), content=search_replace_content + ) + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert str(exc_info.value) == expected_error + + +class TestAcpSearchReplaceSessionUpdates: + def test_tool_call_session_update(self) -> None: + search_replace_content = ( + "<<<<<<< SEARCH\nold text\n=======\nnew text\n>>>>>>> REPLACE" + ) + event = ToolCallEvent( + tool_name="search_replace", + tool_call_id="test_call_123", + args=SearchReplaceArgs( + file_path="/tmp/test.txt", content=search_replace_content + ), + tool_class=SearchReplace, + ) + + update = SearchReplace.tool_call_session_update(event) + assert update is not None + assert update.sessionUpdate == "tool_call" + assert update.toolCallId == "test_call_123" + assert update.kind == "edit" + assert update.title is not None + assert update.content is not None + assert len(update.content) == 1 + assert update.content[0].type == "diff" + assert update.content[0].path == "/tmp/test.txt" + assert update.content[0].oldText == "old text" + assert update.content[0].newText == "new text" + assert update.locations is not None + assert len(update.locations) == 1 + assert update.locations[0].path == "/tmp/test.txt" + + def test_tool_call_session_update_invalid_args(self) -> None: + class InvalidArgs: + pass + + event = ToolCallEvent.model_construct( + tool_name="search_replace", + tool_call_id="test_call_123", + args=InvalidArgs(), # type: ignore[arg-type] + tool_class=SearchReplace, + ) + + update = SearchReplace.tool_call_session_update(event) + assert update is None + + def test_tool_result_session_update(self) -> None: + search_replace_content = ( + "<<<<<<< SEARCH\nold text\n=======\nnew text\n>>>>>>> REPLACE" + ) + result = SearchReplaceResult( + file="/tmp/test.txt", + blocks_applied=1, + lines_changed=1, + content=search_replace_content, + warnings=[], + ) + + event = ToolResultEvent( + tool_name="search_replace", + tool_call_id="test_call_123", + result=result, + tool_class=SearchReplace, + ) + + update = SearchReplace.tool_result_session_update(event) + assert update is not None + assert update.sessionUpdate == "tool_call_update" + assert update.toolCallId == "test_call_123" + assert update.status == "completed" + assert update.content is not None + assert len(update.content) == 1 + assert update.content[0].type == "diff" + assert update.content[0].path == "/tmp/test.txt" + assert update.content[0].oldText == "old text" + assert update.content[0].newText == "new text" + assert update.locations is not None + assert len(update.locations) == 1 + assert update.locations[0].path == "/tmp/test.txt" + + def test_tool_result_session_update_invalid_result(self) -> None: + class InvalidResult: + pass + + event = ToolResultEvent.model_construct( + tool_name="search_replace", + tool_call_id="test_call_123", + result=InvalidResult(), # type: ignore[arg-type] + tool_class=SearchReplace, + ) + + update = SearchReplace.tool_result_session_update(event) + assert update is None diff --git a/tests/acp/test_set_mode.py b/tests/acp/test_set_mode.py new file mode 100644 index 0000000..e04d42e --- /dev/null +++ b/tests/acp/test_set_mode.py @@ -0,0 +1,165 @@ +from __future__ import annotations + +from pathlib import Path +from unittest.mock import patch + +from acp import AgentSideConnection, NewSessionRequest, SetSessionModeRequest +import pytest + +from tests.stubs.fake_backend import FakeBackend +from tests.stubs.fake_connection import FakeAgentSideConnection +from vibe.acp.acp_agent import VibeAcpAgent +from vibe.acp.utils import VibeSessionMode +from vibe.core.agent import Agent +from vibe.core.types import LLMChunk, LLMMessage, LLMUsage, Role + + +@pytest.fixture +def backend() -> FakeBackend: + backend = FakeBackend( + results=[ + LLMChunk( + message=LLMMessage(role=Role.assistant, content="Hi"), + finish_reason="end_turn", + usage=LLMUsage(prompt_tokens=1, completion_tokens=1), + ) + ] + ) + return backend + + +@pytest.fixture +def acp_agent(backend: FakeBackend) -> VibeAcpAgent: + class PatchedAgent(Agent): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs, backend=backend) + + patch("vibe.acp.acp_agent.VibeAgent", side_effect=PatchedAgent).start() + + vibe_acp_agent: VibeAcpAgent | None = None + + def _create_agent(connection: AgentSideConnection) -> VibeAcpAgent: + nonlocal vibe_acp_agent + vibe_acp_agent = VibeAcpAgent(connection) + return vibe_acp_agent + + FakeAgentSideConnection(_create_agent) + return vibe_acp_agent # pyright: ignore[reportReturnType] + + +class TestACPSetMode: + @pytest.mark.asyncio + async def test_set_mode_to_approval_required(self, acp_agent: VibeAcpAgent) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + acp_session.agent.auto_approve = True + acp_session.mode_id = VibeSessionMode.AUTO_APPROVE + + response = await acp_agent.setSessionMode( + SetSessionModeRequest( + sessionId=session_id, modeId=VibeSessionMode.APPROVAL_REQUIRED + ) + ) + + assert response is not None + assert acp_session.mode_id == VibeSessionMode.APPROVAL_REQUIRED + assert acp_session.agent.auto_approve is False + + @pytest.mark.asyncio + async def test_set_mode_to_AUTO_APPROVE(self, acp_agent: VibeAcpAgent) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + assert acp_session.mode_id == VibeSessionMode.APPROVAL_REQUIRED + assert acp_session.agent.auto_approve is False + + response = await acp_agent.setSessionMode( + SetSessionModeRequest( + sessionId=session_id, modeId=VibeSessionMode.AUTO_APPROVE + ) + ) + + assert response is not None + assert acp_session.mode_id == VibeSessionMode.AUTO_APPROVE + assert acp_session.agent.auto_approve is True + + @pytest.mark.asyncio + async def test_set_mode_invalid_mode_returns_none( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + initial_mode_id = acp_session.mode_id + initial_auto_approve = acp_session.agent.auto_approve + + response = await acp_agent.setSessionMode( + SetSessionModeRequest(sessionId=session_id, modeId="invalid-mode") + ) + + assert response is None + assert acp_session.mode_id == initial_mode_id + assert acp_session.agent.auto_approve == initial_auto_approve + + @pytest.mark.asyncio + async def test_set_mode_to_same_mode(self, acp_agent: VibeAcpAgent) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + initial_mode_id = VibeSessionMode.APPROVAL_REQUIRED + assert acp_session.mode_id == initial_mode_id + + response = await acp_agent.setSessionMode( + SetSessionModeRequest(sessionId=session_id, modeId=initial_mode_id) + ) + + assert response is not None + assert acp_session.mode_id == initial_mode_id + assert acp_session.agent.auto_approve is False + + @pytest.mark.asyncio + async def test_set_mode_with_empty_string(self, acp_agent: VibeAcpAgent) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + initial_mode_id = acp_session.mode_id + initial_auto_approve = acp_session.agent.auto_approve + + response = await acp_agent.setSessionMode( + SetSessionModeRequest(sessionId=session_id, modeId="") + ) + + assert response is None + assert acp_session.mode_id == initial_mode_id + assert acp_session.agent.auto_approve == initial_auto_approve diff --git a/tests/acp/test_set_model.py b/tests/acp/test_set_model.py new file mode 100644 index 0000000..d9a0ea5 --- /dev/null +++ b/tests/acp/test_set_model.py @@ -0,0 +1,308 @@ +from __future__ import annotations + +from pathlib import Path +from unittest.mock import patch + +from acp import AgentSideConnection, NewSessionRequest, SetSessionModelRequest +import pytest + +from tests.stubs.fake_backend import FakeBackend +from tests.stubs.fake_connection import FakeAgentSideConnection +from vibe.acp.acp_agent import VibeAcpAgent +from vibe.core.agent import Agent +from vibe.core.config import ModelConfig, VibeConfig +from vibe.core.types import LLMChunk, LLMMessage, LLMUsage, Role + + +@pytest.fixture +def backend() -> FakeBackend: + backend = FakeBackend( + results=[ + LLMChunk( + message=LLMMessage(role=Role.assistant, content="Hi"), + finish_reason="end_turn", + usage=LLMUsage(prompt_tokens=1, completion_tokens=1), + ) + ] + ) + return backend + + +@pytest.fixture +def acp_agent(backend: FakeBackend) -> VibeAcpAgent: + config = VibeConfig( + active_model="devstral-latest", + models=[ + ModelConfig( + name="devstral-latest", + provider="mistral", + alias="devstral-latest", + input_price=0.4, + output_price=2.0, + ), + ModelConfig( + name="devstral-small", + provider="mistral", + alias="devstral-small", + input_price=0.1, + output_price=0.3, + ), + ], + ) + + class PatchedAgent(Agent): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **{**kwargs, "backend": backend}) + self.config = config + try: + active_model = config.get_active_model() + self.stats.input_price_per_million = active_model.input_price + self.stats.output_price_per_million = active_model.output_price + except ValueError: + pass + + patch("vibe.acp.acp_agent.VibeAgent", side_effect=PatchedAgent).start() + + vibe_acp_agent: VibeAcpAgent | None = None + + def _create_agent(connection: AgentSideConnection) -> VibeAcpAgent: + nonlocal vibe_acp_agent + vibe_acp_agent = VibeAcpAgent(connection) + return vibe_acp_agent + + FakeAgentSideConnection(_create_agent) + return vibe_acp_agent # pyright: ignore[reportReturnType] + + +class TestACPSetModel: + @pytest.mark.asyncio + async def test_set_model_success(self, acp_agent: VibeAcpAgent) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + assert acp_session.agent.config.active_model == "devstral-latest" + + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="devstral-small") + ) + + assert response is not None + assert acp_session.agent.config.active_model == "devstral-small" + + @pytest.mark.asyncio + async def test_set_model_invalid_model_returns_none( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + initial_model = acp_session.agent.config.active_model + + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="non-existent-model") + ) + + assert response is None + assert acp_session.agent.config.active_model == initial_model + + @pytest.mark.asyncio + async def test_set_model_to_same_model(self, acp_agent: VibeAcpAgent) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + initial_model = "devstral-latest" + assert acp_session is not None + assert acp_session.agent.config.active_model == initial_model + + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId=initial_model) + ) + + assert response is not None + assert acp_session.agent.config.active_model == initial_model + + @pytest.mark.asyncio + async def test_set_model_saves_to_config(self, acp_agent: VibeAcpAgent) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + + with patch("vibe.acp.acp_agent.VibeConfig.save_updates") as mock_save: + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="devstral-small") + ) + + assert response is not None + mock_save.assert_called_once_with({"active_model": "devstral-small"}) + + @pytest.mark.asyncio + async def test_set_model_does_not_save_on_invalid_model( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + + with patch("vibe.acp.acp_agent.VibeConfig.save_updates") as mock_save: + response = await acp_agent.setSessionModel( + SetSessionModelRequest( + sessionId=session_id, modelId="non-existent-model" + ) + ) + + assert response is None + mock_save.assert_not_called() + + @pytest.mark.asyncio + async def test_set_model_with_empty_string(self, acp_agent: VibeAcpAgent) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + initial_model = acp_session.agent.config.active_model + + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="") + ) + + assert response is None + assert acp_session.agent.config.active_model == initial_model + + @pytest.mark.asyncio + async def test_set_model_updates_active_model( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + assert acp_session.agent.config.get_active_model().alias == "devstral-latest" + + await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="devstral-small") + ) + + assert acp_session.agent.config.get_active_model().alias == "devstral-small" + + @pytest.mark.asyncio + async def test_set_model_calls_reload_with_initial_messages( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + with patch.object( + acp_session.agent, "reload_with_initial_messages" + ) as mock_reload: + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="devstral-small") + ) + + assert response is not None + mock_reload.assert_called_once() + call_args = mock_reload.call_args + assert call_args.kwargs["config"] is not None + assert call_args.kwargs["config"].active_model == "devstral-small" + + @pytest.mark.asyncio + async def test_set_model_preserves_conversation_history( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + user_msg = LLMMessage(role=Role.user, content="Hello") + assistant_msg = LLMMessage(role=Role.assistant, content="Hi there!") + acp_session.agent.messages.append(user_msg) + acp_session.agent.messages.append(assistant_msg) + + assert len(acp_session.agent.messages) == 3 + + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="devstral-small") + ) + + assert response is not None + assert len(acp_session.agent.messages) == 3 + assert acp_session.agent.messages[0].role == Role.system + assert acp_session.agent.messages[1].content == "Hello" + assert acp_session.agent.messages[2].content == "Hi there!" + + @pytest.mark.asyncio + async def test_set_model_resets_stats_with_new_model_pricing( + self, acp_agent: VibeAcpAgent + ) -> None: + session_response = await acp_agent.newSession( + NewSessionRequest(cwd=str(Path.cwd()), mcpServers=[]) + ) + session_id = session_response.sessionId + acp_session = next( + (s for s in acp_agent.sessions.values() if s.id == session_id), None + ) + assert acp_session is not None + + initial_model = acp_session.agent.config.get_active_model() + initial_input_price = initial_model.input_price + initial_output_price = initial_model.output_price + + initial_stats_input = acp_session.agent.stats.input_price_per_million + initial_stats_output = acp_session.agent.stats.output_price_per_million + + assert acp_session.agent.stats.input_price_per_million == initial_input_price + assert acp_session.agent.stats.output_price_per_million == initial_output_price + + response = await acp_agent.setSessionModel( + SetSessionModelRequest(sessionId=session_id, modelId="devstral-small") + ) + + assert response is not None + + new_model = acp_session.agent.config.get_active_model() + new_input_price = new_model.input_price + new_output_price = new_model.output_price + + assert new_input_price != initial_input_price + assert new_output_price != initial_output_price + + assert acp_session.agent.stats.input_price_per_million == new_input_price + assert acp_session.agent.stats.output_price_per_million == new_output_price + + assert acp_session.agent.stats.input_price_per_million != initial_stats_input + assert acp_session.agent.stats.output_price_per_million != initial_stats_output diff --git a/tests/acp/test_write_file.py b/tests/acp/test_write_file.py new file mode 100644 index 0000000..d800542 --- /dev/null +++ b/tests/acp/test_write_file.py @@ -0,0 +1,269 @@ +from __future__ import annotations + +from pathlib import Path + +from acp import WriteTextFileRequest +import pytest + +from vibe.acp.tools.builtins.write_file import AcpWriteFileState, WriteFile +from vibe.core.tools.base import ToolError +from vibe.core.tools.builtins.write_file import ( + WriteFileArgs, + WriteFileConfig, + WriteFileResult, +) +from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class MockConnection: + def __init__( + self, write_error: Exception | None = None, file_exists: bool = False + ) -> None: + self._write_error = write_error + self._file_exists = file_exists + self._write_text_file_called = False + self._session_update_called = False + self._last_write_request: WriteTextFileRequest | None = None + + async def writeTextFile(self, request: WriteTextFileRequest) -> None: + self._write_text_file_called = True + self._last_write_request = request + + if self._write_error: + raise self._write_error + + async def sessionUpdate(self, notification) -> None: + self._session_update_called = True + + +@pytest.fixture +def mock_connection() -> MockConnection: + return MockConnection() + + +@pytest.fixture +def acp_write_file_tool(mock_connection: MockConnection, tmp_path: Path) -> WriteFile: + config = WriteFileConfig(workdir=tmp_path) + state = AcpWriteFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session_123", + tool_call_id="test_tool_call_456", + ) + return WriteFile(config=config, state=state) + + +class TestAcpWriteFileBasic: + def test_get_name(self) -> None: + assert WriteFile.get_name() == "write_file" + + +class TestAcpWriteFileExecution: + @pytest.mark.asyncio + async def test_run_success_new_file( + self, + acp_write_file_tool: WriteFile, + mock_connection: MockConnection, + tmp_path: Path, + ) -> None: + test_file = tmp_path / "test_file.txt" + args = WriteFileArgs(path=str(test_file), content="Hello, world!") + result = await acp_write_file_tool.run(args) + + assert isinstance(result, WriteFileResult) + assert result.path == str(test_file) + assert result.content == "Hello, world!" + assert result.bytes_written == len(b"Hello, world!") + assert result.file_existed is False + assert mock_connection._write_text_file_called + assert mock_connection._session_update_called + + # Verify WriteTextFileRequest was created correctly + request = mock_connection._last_write_request + assert request is not None + assert request.sessionId == "test_session_123" + assert request.path == str(test_file) + assert request.content == "Hello, world!" + + @pytest.mark.asyncio + async def test_run_success_overwrite( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + tool = WriteFile( + config=WriteFileConfig(workdir=tmp_path), + state=AcpWriteFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + test_file = tmp_path / "existing_file.txt" + test_file.touch() + # Simulate existing file by checking in the core tool logic + # The ACP tool doesn't check existence, it's handled by the core tool + args = WriteFileArgs(path=str(test_file), content="New content", overwrite=True) + result = await tool.run(args) + + assert isinstance(result, WriteFileResult) + assert result.path == str(test_file) + assert result.content == "New content" + assert result.bytes_written == len(b"New content") + assert result.file_existed is True + assert mock_connection._write_text_file_called + assert mock_connection._session_update_called + + # Verify WriteTextFileRequest was created correctly + request = mock_connection._last_write_request + assert request is not None + assert request.sessionId == "test_session" + assert request.path == str(test_file) + assert request.content == "New content" + + @pytest.mark.asyncio + async def test_run_write_error( + self, mock_connection: MockConnection, tmp_path: Path + ) -> None: + mock_connection._write_error = RuntimeError("Permission denied") + + tool = WriteFile( + config=WriteFileConfig(workdir=tmp_path), + state=AcpWriteFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id="test_session", + tool_call_id="test_call", + ), + ) + + test_file = tmp_path / "test.txt" + args = WriteFileArgs(path=str(test_file), content="test") + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert str(exc_info.value) == f"Error writing {test_file}: Permission denied" + + @pytest.mark.asyncio + async def test_run_without_connection(self, tmp_path: Path) -> None: + tool = WriteFile( + config=WriteFileConfig(workdir=tmp_path), + state=AcpWriteFileState.model_construct( + connection=None, session_id="test_session", tool_call_id="test_call" + ), + ) + + args = WriteFileArgs(path=str(tmp_path / "test.txt"), content="test") + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == "Connection not available in tool state. This tool can only be used within an ACP session." + ) + + @pytest.mark.asyncio + async def test_run_without_session_id(self, tmp_path: Path) -> None: + mock_connection = MockConnection() + tool = WriteFile( + config=WriteFileConfig(workdir=tmp_path), + state=AcpWriteFileState.model_construct( + connection=mock_connection, # type: ignore[arg-type] + session_id=None, + tool_call_id="test_call", + ), + ) + + args = WriteFileArgs(path=str(tmp_path / "test.txt"), content="test") + with pytest.raises(ToolError) as exc_info: + await tool.run(args) + + assert ( + str(exc_info.value) + == "Session ID not available in tool state. This tool can only be used within an ACP session." + ) + + +class TestAcpWriteFileSessionUpdates: + def test_tool_call_session_update(self) -> None: + event = ToolCallEvent( + tool_name="write_file", + tool_call_id="test_call_123", + args=WriteFileArgs(path="/tmp/test.txt", content="Hello"), + tool_class=WriteFile, + ) + + update = WriteFile.tool_call_session_update(event) + assert update is not None + assert update.sessionUpdate == "tool_call" + assert update.toolCallId == "test_call_123" + assert update.kind == "edit" + assert update.title is not None + assert update.content is not None + assert len(update.content) == 1 + assert update.content[0].type == "diff" + assert update.content[0].path == "/tmp/test.txt" + assert update.content[0].oldText is None + assert update.content[0].newText == "Hello" + assert update.locations is not None + assert len(update.locations) == 1 + assert update.locations[0].path == "/tmp/test.txt" + + def test_tool_call_session_update_invalid_args(self) -> None: + from vibe.core.types import FunctionCall, ToolCall + + class InvalidArgs: + pass + + event = ToolCallEvent.model_construct( + tool_name="write_file", + tool_call_id="test_call_123", + args=InvalidArgs(), # type: ignore[arg-type] + tool_class=WriteFile, + llm_tool_call=ToolCall( + function=FunctionCall(name="write_file", arguments="{}"), + type="function", + index=0, + ), + ) + + update = WriteFile.tool_call_session_update(event) + assert update is None + + def test_tool_result_session_update(self) -> None: + result = WriteFileResult( + path="/tmp/test.txt", content="Hello", bytes_written=5, file_existed=False + ) + + event = ToolResultEvent( + tool_name="write_file", + tool_call_id="test_call_123", + result=result, + tool_class=WriteFile, + ) + + update = WriteFile.tool_result_session_update(event) + assert update is not None + assert update.sessionUpdate == "tool_call_update" + assert update.toolCallId == "test_call_123" + assert update.status == "completed" + assert update.content is not None + assert len(update.content) == 1 + assert update.content[0].type == "diff" + assert update.content[0].path == "/tmp/test.txt" + assert update.content[0].oldText is None + assert update.content[0].newText == "Hello" + assert update.locations is not None + assert len(update.locations) == 1 + assert update.locations[0].path == "/tmp/test.txt" + + def test_tool_result_session_update_invalid_result(self) -> None: + class InvalidResult: + pass + + event = ToolResultEvent.model_construct( + tool_name="write_file", + tool_call_id="test_call_123", + result=InvalidResult(), # type: ignore[arg-type] + tool_class=WriteFile, + ) + + update = WriteFile.tool_result_session_update(event) + assert update is None diff --git a/tests/autocompletion/test_file_indexer.py b/tests/autocompletion/test_file_indexer.py new file mode 100644 index 0000000..dbd3b10 --- /dev/null +++ b/tests/autocompletion/test_file_indexer.py @@ -0,0 +1,231 @@ +from __future__ import annotations + +from collections.abc import Callable, Generator +from concurrent.futures import ThreadPoolExecutor +from pathlib import Path +import time + +import pytest + +from vibe.core.autocompletion.file_indexer import FileIndexer + +# This suite runs against the real filesystem and watcher. A faked store/watcher +# split would be faster to unit-test, but given time constraints and the low churn +# expected for this feature, integration coverage was chosen as a trade-off. + + +@pytest.fixture +def file_indexer() -> Generator[FileIndexer]: + indexer = FileIndexer() + yield indexer + indexer.shutdown() + + +def _wait_for(condition: Callable[[], bool], timeout=3.0) -> bool: + deadline = time.monotonic() + timeout + while time.monotonic() < deadline: + if condition(): + return True + time.sleep(0.05) + return False + + +def test_updates_index_on_file_creation( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, file_indexer: FileIndexer +) -> None: + monkeypatch.chdir(tmp_path) + file_indexer.get_index(Path(".")) + + target = tmp_path / "new_file.py" + target.write_text("", encoding="utf-8") + + assert _wait_for( + lambda: any( + entry.rel == target.name for entry in file_indexer.get_index(Path(".")) + ) + ) + + +def test_updates_index_on_file_deletion( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, file_indexer: FileIndexer +) -> None: + monkeypatch.chdir(tmp_path) + target = tmp_path / "new_file.py" + target.write_text("", encoding="utf-8") + file_indexer.get_index(Path(".")) + + target.unlink() + + assert _wait_for( + lambda: all( + entry.rel != target.name for entry in file_indexer.get_index(Path(".")) + ) + ) + + +def test_updates_index_on_file_rename( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, file_indexer: FileIndexer +) -> None: + monkeypatch.chdir(tmp_path) + old_file = tmp_path / "old_name.py" + old_file.write_text("", encoding="utf-8") + file_indexer.get_index(Path(".")) + + new_file = tmp_path / "new_name.py" + old_file.rename(new_file) + + assert _wait_for( + lambda: all( + entry.rel != old_file.name for entry in file_indexer.get_index(Path(".")) + ) + and any( + entry.rel == new_file.name for entry in file_indexer.get_index(Path(".")) + ) + ) + + +def test_updates_index_on_folder_rename( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, file_indexer: FileIndexer +) -> None: + monkeypatch.chdir(tmp_path) + old_folder = tmp_path / "old_folder" + old_folder.mkdir() + number_of_files = 5 + file_names = [f"file{i}.py" for i in range(1, number_of_files + 1)] + old_file_paths = [old_folder / name for name in file_names] + for file_path in old_file_paths: + file_path.write_text("", encoding="utf-8") + file_indexer.get_index(Path(".")) + + new_folder = tmp_path / "new_folder" + old_folder.rename(new_folder) + + assert _wait_for( + lambda: ( + entries := file_indexer.get_index(Path(".")), + all(not entry.rel.startswith("old_folder/") for entry in entries) + and all( + any(entry.rel == f"new_folder/{name}" for entry in entries) + for name in file_names + ), + )[1] + ) + + +def test_updates_index_incrementally_by_default( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, file_indexer: FileIndexer +) -> None: + monkeypatch.chdir(tmp_path) + file_indexer.get_index(Path(".")) + + rebuilds_before = file_indexer.stats.rebuilds + incremental_before = file_indexer.stats.incremental_updates + + target = tmp_path / "stats_file.py" + target.write_text("", encoding="utf-8") + + assert _wait_for( + lambda: any( + entry.rel == target.name for entry in file_indexer.get_index(Path(".")) + ) + ) + + assert file_indexer.stats.rebuilds == rebuilds_before + assert file_indexer.stats.incremental_updates >= incremental_before + 1 + + +def test_rebuilds_index_when_mass_change_threshold_is_exceeded( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch +) -> None: + mass_change_threshold = 5 + # in an ideal world, we would use "threshold + 1", but in reality, we need to test with a + # number of files important enough to MAKE SURE that a batch of >= threshold events will be + # detected by the watcher + number_of_files = mass_change_threshold * 3 + monkeypatch.chdir(tmp_path) + indexer = FileIndexer(mass_change_threshold=mass_change_threshold) + try: + indexer.get_index(Path(".")) + rebuilds_before = indexer.stats.rebuilds + + ThreadPoolExecutor(max_workers=number_of_files).map( + lambda i: (tmp_path / f"bulk{i}.py").write_text("", encoding="utf-8"), + range(number_of_files), + ) + + assert _wait_for(lambda: len(indexer.get_index(Path("."))) == number_of_files) + # we do not assert that "incremental_updates" did not change, + # as the watcher potentially reported some batches of events that were + # smaller than the threshold + assert indexer.stats.rebuilds >= rebuilds_before + 1 + finally: + indexer.shutdown() + + +def test_switching_between_roots_restarts_index( + tmp_path: Path, + tmp_path_factory: pytest.TempPathFactory, + monkeypatch: pytest.MonkeyPatch, + file_indexer: FileIndexer, +) -> None: + first_root = tmp_path + second_root = tmp_path_factory.mktemp("second-root") + (first_root / "first.py").write_text("", encoding="utf-8") + (second_root / "second.py").write_text("", encoding="utf-8") + + monkeypatch.chdir(first_root) + assert _wait_for( + lambda: any( + entry.rel == "first.py" for entry in file_indexer.get_index(Path(".")) + ) + ) + + monkeypatch.chdir(second_root) + assert _wait_for( + lambda: all( + entry.rel != "first.py" for entry in file_indexer.get_index(Path(".")) + ) + ) + assert _wait_for( + lambda: any( + entry.rel == "second.py" for entry in file_indexer.get_index(Path(".")) + ) + ) + + +def test_watcher_failure_does_not_break_existing_index( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, file_indexer: FileIndexer +) -> None: + monkeypatch.chdir(tmp_path) + seed = tmp_path / "seed.py" + seed.write_text("", encoding="utf-8") + file_indexer.get_index(Path(".")) + + def boom(*_: object, **__: object) -> None: + raise RuntimeError("boom") + + monkeypatch.setattr(file_indexer._store, "apply_changes", boom) + + (tmp_path / "new_file.py").write_text("", encoding="utf-8") + + assert _wait_for( + lambda: ( + entries := file_indexer.get_index(Path(".")), + # new file was not added: watcher failed + all(entry.rel != "new_file.py" for entry in entries) + # but the existing index is still intact + and all(entry.rel == "seed.py" for entry in entries), + )[1] + ) + + +def test_shutdown_cleans_up_resources( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch +) -> None: + monkeypatch.chdir(tmp_path) + (tmp_path / "test.txt").write_text("", encoding="utf-8") + file_indexer = FileIndexer() + file_indexer.get_index(Path(".")) + + file_indexer.shutdown() + assert file_indexer.get_index(Path(".")) == [] diff --git a/tests/autocompletion/test_fuzzy.py b/tests/autocompletion/test_fuzzy.py new file mode 100644 index 0000000..6622637 --- /dev/null +++ b/tests/autocompletion/test_fuzzy.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +from vibe.core.autocompletion.fuzzy import fuzzy_match + + +def test_empty_pattern_matches_anything() -> None: + result = fuzzy_match("", "any_text") + + assert result.matched is True + assert result.score == 0.0 + assert result.matched_indices == () + + +def test_matches_exact_prefix() -> None: + result = fuzzy_match("src/", "src/main.py") + + assert result.matched_indices == (0, 1, 2, 3) + + +def test_no_match_when_characters_are_out_of_order() -> None: + result = fuzzy_match("ms", "src/main.py") + + assert result.matched is False + + +def test_treats_consecutive_characters_as_subsequence() -> None: + result = fuzzy_match("main", "src/main.py") + + assert result.matched_indices == (4, 5, 6, 7) + + +def test_ignores_case() -> None: + result = fuzzy_match("SRC", "src/main.py") + + assert result.matched_indices == (0, 1, 2) + + +def test_treats_scattered_characters_as_subsequence() -> None: + result = fuzzy_match("sm", "src/main.py") + + assert result.matched_indices == (0, 4) + + +def test_treats_path_separator_as_word_boundary() -> None: + result = fuzzy_match("m", "src/main.py") + + assert result.matched_indices == (4,) + + +def test_prefers_word_boundary_matching_over_subsequence() -> None: + boundary_result = fuzzy_match("ma", "src/main.py") + subsequence_result = fuzzy_match("ma", "src/important.py") + + assert boundary_result.score > subsequence_result.score + + +def test_scores_exact_prefix_match_higher_than_consecutive_and_subsequence() -> None: + prefix_result = fuzzy_match("src", "src/main.py") + consecutive_result = fuzzy_match("main", "src/main.py") + subsequence_result = fuzzy_match("sm", "src/main.py") + + assert prefix_result.matched_indices == (0, 1, 2) + assert prefix_result.score > consecutive_result.score + assert prefix_result.score > subsequence_result.score + + +def test_finds_no_match_when_pattern_is_longer_than_entry() -> None: + result = fuzzy_match("very_long_pattern", "short") + + assert result.matched is False + + +def test_prefers_consecutive_match_over_subsequence() -> None: + consecutive = fuzzy_match("main", "src/main.py") + subsequence = fuzzy_match("mn", "src/main.py") + + assert consecutive.score > subsequence.score + + +def test_prefers_case_sensitive_match_over_case_insensitive() -> None: + case_match = fuzzy_match("Main", "src/Main.py") + case_insensitive_match = fuzzy_match("main", "src/Main.py") + + assert case_match.score > case_insensitive_match.score + + +def test_treats_uppercase_letter_as_word_boundary() -> None: + result = fuzzy_match("MP", "src/MainPy.py") + + assert result.matched_indices == (4, 8) + + +def test_favors_earlier_positions() -> None: + result = fuzzy_match("a", "banana") + + assert result.matched_indices == (1,) diff --git a/tests/autocompletion/test_path_completer_fuzzy.py b/tests/autocompletion/test_path_completer_fuzzy.py new file mode 100644 index 0000000..d2ef941 --- /dev/null +++ b/tests/autocompletion/test_path_completer_fuzzy.py @@ -0,0 +1,122 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + +from vibe.core.autocompletion.completers import PathCompleter + + +@pytest.fixture() +def file_tree(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path: + (tmp_path / "src" / "utils").mkdir(parents=True) + (tmp_path / "src" / "main.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "models.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core").mkdir(parents=True) + (tmp_path / "src" / "core" / "logger.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "models.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "ports.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "sanitize.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "use_cases.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "validate.py").write_text("", encoding="utf-8") + (tmp_path / "README.md").write_text("", encoding="utf-8") + (tmp_path / ".env").write_text("", encoding="utf-8") + (tmp_path / "config").mkdir(parents=True) + (tmp_path / "config" / "settings.py").write_text("", encoding="utf-8") + (tmp_path / "config" / "database.py").write_text("", encoding="utf-8") + monkeypatch.chdir(tmp_path) + return tmp_path + + +def test_fuzzy_matches_subsequence_characters(file_tree: Path) -> None: + results = PathCompleter().get_completions("@sr", cursor_pos=3) + + assert "@src/" in results + + +def test_fuzzy_matches_consecutive_characters_higher(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/main", cursor_pos=9) + + assert "@src/main.py" in results + + +def test_fuzzy_matches_prefix_highest(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src", cursor_pos=4) + + assert results[0].startswith("@src") + + +def test_fuzzy_matches_across_directory_boundaries(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/main", cursor_pos=9) + + assert "@src/main.py" in results + + +def test_fuzzy_matches_case_insensitive(file_tree: Path) -> None: + completer = PathCompleter() + assert "@README.md" in completer.get_completions("@readme", cursor_pos=7) + assert "@README.md" in completer.get_completions("@README", cursor_pos=7) + + +def test_fuzzy_matches_word_boundaries_preferred(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/mp", cursor_pos=7) + + assert "@src/models.py" in results + + +def test_fuzzy_matches_empty_pattern_shows_all(file_tree: Path) -> None: + results = PathCompleter().get_completions("@", cursor_pos=1) + + assert "@README.md" in results + assert "@src/" in results + + +def test_fuzzy_matches_hidden_files_only_with_dot(file_tree: Path) -> None: + completer = PathCompleter() + assert "@.env" not in completer.get_completions("@e", cursor_pos=2) + assert "@.env" in completer.get_completions("@.", cursor_pos=2) + + +def test_fuzzy_matches_directories_and_files(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/", cursor_pos=5) + + assert any(r.endswith("/") for r in results) + assert any(not r.endswith("/") for r in results) + + +def test_fuzzy_matches_sorted_by_score(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/main", cursor_pos=9) + + assert results[0] == "@src/main.py" + + +def test_fuzzy_matches_nested_directories(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/core/l", cursor_pos=11) + + assert "@src/core/logger.py" in results + + +def test_fuzzy_matches_partial_filename(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/mo", cursor_pos=7) + + assert "@src/models.py" in results + + +def test_fuzzy_matches_multiple_files_with_same_pattern(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/m", cursor_pos=6) + + assert "@src/main.py" in results + assert "@src/models.py" in results + + +def test_fuzzy_matches_no_results_when_no_match(file_tree: Path) -> None: + completer = PathCompleter() + assert completer.get_completions("@xyz123", cursor_pos=7) == [] + + +def test_fuzzy_matches_directory_traversal(file_tree: Path) -> None: + results = PathCompleter().get_completions("@src/", cursor_pos=5) + + assert "@src/main.py" in results + assert "@src/core/" in results + assert "@src/utils/" in results diff --git a/tests/autocompletion/test_path_completer_recursive.py b/tests/autocompletion/test_path_completer_recursive.py new file mode 100644 index 0000000..185eba3 --- /dev/null +++ b/tests/autocompletion/test_path_completer_recursive.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + +from vibe.core.autocompletion.completers import PathCompleter + + +@pytest.fixture() +def file_tree(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path: + (tmp_path / "vibe" / "acp").mkdir(parents=True) + (tmp_path / "vibe" / "acp" / "entrypoint.py").write_text("") + (tmp_path / "vibe" / "acp" / "agent.py").write_text("") + (tmp_path / "vibe" / "cli" / "autocompletion").mkdir(parents=True) + (tmp_path / "vibe" / "cli" / "autocompletion" / "fuzzy.py").write_text("") + (tmp_path / "vibe" / "cli" / "autocompletion" / "completers.py").write_text("") + (tmp_path / "tests" / "autocompletion").mkdir(parents=True) + (tmp_path / "tests" / "autocompletion" / "test_fuzzy.py").write_text("") + (tmp_path / "README.md").write_text("") + monkeypatch.chdir(tmp_path) + return tmp_path + + +def test_finds_files_recursively_by_filename(file_tree: Path) -> None: + results = PathCompleter().get_completions("@entryp", cursor_pos=7) + + assert results[0] == "@vibe/acp/entrypoint.py" + + +def test_finds_files_recursively_by_partial_path(file_tree: Path) -> None: + results = PathCompleter().get_completions("@acp/entry", cursor_pos=10) + + assert results[0] == "@vibe/acp/entrypoint.py" + + +def test_finds_files_recursively_with_subsequence(file_tree: Path) -> None: + results = PathCompleter().get_completions("@acp/ent", cursor_pos=9) + + assert results[0] == "@vibe/acp/entrypoint.py" + + +def test_finds_multiple_matches_recursively(file_tree: Path) -> None: + results = PathCompleter().get_completions("@fuzzy", cursor_pos=6) + + vibe_index = results.index("@vibe/cli/autocompletion/fuzzy.py") + test_index = results.index("@tests/autocompletion/test_fuzzy.py") + assert vibe_index < test_index + + +def test_prioritizes_exact_path_matches(file_tree: Path) -> None: + results = PathCompleter().get_completions("@vibe/acp/entrypoint", cursor_pos=20) + + assert results[0] == "@vibe/acp/entrypoint.py" + + +def test_finds_files_when_pattern_matches_directory_name(file_tree: Path) -> None: + results = PathCompleter().get_completions("@acp", cursor_pos=4) + + assert results == [ + "@vibe/acp/", + "@vibe/acp/agent.py", + "@vibe/acp/entrypoint.py", + "@vibe/cli/autocompletion/completers.py", + "@tests/autocompletion/", + "@tests/autocompletion/test_fuzzy.py", + "@vibe/cli/autocompletion/", + "@vibe/cli/autocompletion/fuzzy.py", + ] diff --git a/tests/autocompletion/test_path_completion_controller.py b/tests/autocompletion/test_path_completion_controller.py new file mode 100644 index 0000000..7378b76 --- /dev/null +++ b/tests/autocompletion/test_path_completion_controller.py @@ -0,0 +1,258 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest +from textual import events + +from vibe.cli.autocompletion.base import CompletionResult, CompletionView +from vibe.cli.autocompletion.path_completion import PathCompletionController +from vibe.core.autocompletion.completers import PathCompleter + + +class StubView(CompletionView): + def __init__(self) -> None: + self.suggestions: list[tuple[list[tuple[str, str]], int]] = [] + self.clears = 0 + self.replacements: list[tuple[int, int, str]] = [] + + def render_completion_suggestions( + self, suggestions: list[tuple[str, str]], selected_index: int + ) -> None: + self.suggestions.append((suggestions, selected_index)) + + def clear_completion_suggestions(self) -> None: + self.clears += 1 + + def replace_completion_range(self, start: int, end: int, replacement: str) -> None: + self.replacements.append((start, end, replacement)) + + +@pytest.fixture() +def file_tree(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path: + (tmp_path / "src" / "utils").mkdir(parents=True) + (tmp_path / "src" / "main.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core").mkdir(parents=True) + (tmp_path / "src" / "core" / "logger.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "models.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "ports.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "sanitize.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "use_cases.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "core" / "validate.py").write_text("", encoding="utf-8") + (tmp_path / "README.md").write_text("", encoding="utf-8") + (tmp_path / ".env").write_text("", encoding="utf-8") + monkeypatch.chdir(tmp_path) + return tmp_path + + +def make_controller( + max_entries_to_process: int | None = None, target_matches: int | None = None +) -> tuple[PathCompletionController, StubView]: + completer_kwargs = {} + if max_entries_to_process is not None: + completer_kwargs["max_entries_to_process"] = max_entries_to_process + if target_matches is not None: + completer_kwargs["target_matches"] = target_matches + + completer = PathCompleter(**completer_kwargs) + view = StubView() + controller = PathCompletionController(completer, view) + return controller, view + + +def test_lists_root_entries(file_tree: Path) -> None: + controller, view = make_controller() + + controller.on_text_changed("@", cursor_index=1) + + suggestions, selected = view.suggestions[-1] + assert selected == 0 + assert [alias for alias, _ in suggestions] == ["@README.md", "@src/"] + + +def test_suggests_hidden_entries_only_with_dot_prefix(file_tree: Path) -> None: + controller, view = make_controller() + + controller.on_text_changed("@.", cursor_index=2) + + suggestions, _ = view.suggestions[-1] + assert suggestions[0][0] == "@.env" + + +def test_lists_nested_entries_when_prefixing_with_folder_name(file_tree: Path) -> None: + controller, view = make_controller() + + controller.on_text_changed("@src/", cursor_index=5) + + suggestions, _ = view.suggestions[-1] + assert [alias for alias, _ in suggestions] == [ + "@src/core/", + "@src/main.py", + "@src/utils/", + ] + + +def test_resets_when_fragment_invalid(file_tree: Path) -> None: + controller, view = make_controller() + + controller.on_text_changed("@src", cursor_index=4) + assert view.suggestions + + controller.on_text_changed("@src foo", cursor_index=8) + assert view.clears == 1 + assert ( + controller.on_key(events.Key("tab", None), "@src foo", 8) + is CompletionResult.IGNORED + ) + + +def test_applies_selected_completion_on_tab_keycode(file_tree: Path) -> None: + controller, view = make_controller() + + controller.on_text_changed("@R", cursor_index=2) + result = controller.on_key(events.Key("tab", None), "@R", 2) + + assert result is CompletionResult.HANDLED + assert view.replacements == [(0, 2, "@README.md")] + assert view.clears == 1 + + +def test_applies_selected_completion_on_enter_keycode(file_tree: Path) -> None: + controller, view = make_controller() + controller.on_text_changed("@src/", cursor_index=5) + controller.on_key(events.Key("down", None), "@src/", 5) + + result = controller.on_key(events.Key("enter", None), "@src/", 5) + + assert result is CompletionResult.HANDLED + assert view.replacements == [(0, 5, "@src/main.py")] + assert view.clears == 1 + + +def test_navigates_and_cycles_across_suggestions(file_tree: Path) -> None: + controller, view = make_controller() + + controller.on_text_changed("@src/", cursor_index=5) + controller.on_key(events.Key("down", None), "@src/", 5) + suggestions, selected_index = view.suggestions[-1] + assert [alias for alias, _ in suggestions] == [ + "@src/core/", + "@src/main.py", + "@src/utils/", + ] + assert selected_index == 1 + controller.on_key(events.Key("up", None), "@src/", 5) + suggestions, selected_index = view.suggestions[-1] + assert selected_index == 0 + + controller.on_key(events.Key("down", None), "@src/", 5) + controller.on_key(events.Key("down", None), "@src/", 5) + suggestions, selected_index = view.suggestions[-1] + assert selected_index == 2 + + controller.on_key(events.Key("down", None), "@src/", 5) + suggestions, selected_index = view.suggestions[-1] + assert selected_index == 0 + + +def test_limits_suggestions_to_ten(file_tree: Path) -> None: + (file_tree / "src" / "core" / "extra").mkdir(parents=True) + [ + (file_tree / "src" / "core" / "extra" / f"extra_file_{i}.py").write_text( + "", encoding="utf-8" + ) + for i in range(1, 13) + ] + controller, view = make_controller() + + controller.on_text_changed("@src/core/extra/", cursor_index=16) + suggestions, selected_index = view.suggestions[-1] + assert len(suggestions) == 10 + assert [alias for alias, _ in suggestions] == [ + "@src/core/extra/extra_file_1.py", + "@src/core/extra/extra_file_10.py", + "@src/core/extra/extra_file_11.py", + "@src/core/extra/extra_file_12.py", + "@src/core/extra/extra_file_2.py", + "@src/core/extra/extra_file_3.py", + "@src/core/extra/extra_file_4.py", + "@src/core/extra/extra_file_5.py", + "@src/core/extra/extra_file_6.py", + "@src/core/extra/extra_file_7.py", + ] + assert selected_index == 0 + + +def test_does_not_handle_when_cursor_at_beginning_of_input(file_tree: Path) -> None: + controller, _ = make_controller() + + assert not controller.can_handle("@file", cursor_index=0) + assert not controller.can_handle("", cursor_index=0) + assert not controller.can_handle("hello@file", cursor_index=0) + + +def test_does_not_handle_when_cursor_before_or_at_the_at_symbol( + file_tree: Path, +) -> None: + controller, _ = make_controller() + + assert not controller.can_handle("@file", cursor_index=0) + assert not controller.can_handle("hello@file", cursor_index=5) + + +def test_does_handle_when_cursor_after_the_at_symbol_even_in_the_middle_of_the_input( + file_tree: Path, +) -> None: + controller, _ = make_controller() + + assert controller.can_handle("@file", cursor_index=1) + assert controller.can_handle("hello @file", cursor_index=7) + + +def test_lists_immediate_children_when_path_ends_with_slash(file_tree: Path) -> None: + controller, view = make_controller() + + controller.on_text_changed("@src/", cursor_index=5) + + suggestions, _ = view.suggestions[-1] + assert [alias for alias, _ in suggestions] == [ + "@src/core/", + "@src/main.py", + "@src/utils/", + ] + + +def test_respects_max_entries_to_process_limit(file_tree: Path) -> None: + for i in range(30): + (file_tree / f"file_{i:03d}.txt").write_text("", encoding="utf-8") + + controller, view = make_controller(max_entries_to_process=10) + + controller.on_text_changed("@", cursor_index=1) + + suggestions, _ = view.suggestions[-1] + assert len(suggestions) <= 10 + + +def test_respects_target_matches_limit_for_listing(file_tree: Path) -> None: + for i in range(30): + (file_tree / f"item_{i:03d}.txt").write_text("", encoding="utf-8") + + controller, view = make_controller(target_matches=5) + + controller.on_text_changed("@", cursor_index=1) + + suggestions, _ = view.suggestions[-1] + assert len(suggestions) <= 5 + + +def test_respects_target_matches_limit_for_fuzzy_search(file_tree: Path) -> None: + for i in range(30): + (file_tree / f"test_file_{i:03d}.py").write_text("", encoding="utf-8") + + controller, view = make_controller(target_matches=5) + + controller.on_text_changed("@test", cursor_index=5) + + suggestions, _ = view.suggestions[-1] + assert len(suggestions) <= 5 diff --git a/tests/autocompletion/test_path_prompt_transformer.py b/tests/autocompletion/test_path_prompt_transformer.py new file mode 100644 index 0000000..e874654 --- /dev/null +++ b/tests/autocompletion/test_path_prompt_transformer.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +from pathlib import Path + +from vibe.core.autocompletion.path_prompt_adapter import ( + DEFAULT_MAX_EMBED_BYTES, + render_path_prompt, +) + + +def test_treats_paths_to_files_as_embedded_resources(tmp_path: Path) -> None: + readme = tmp_path / "README.md" + readme.write_text("hello", encoding="utf-8") + src_dir = tmp_path / "src" + src_dir.mkdir() + main_py = src_dir / "main.py" + main_py.write_text("print('hi')", encoding="utf-8") + + rendered = render_path_prompt( + "Please review @README.md and @src/main.py", + base_dir=tmp_path, + max_embed_bytes=DEFAULT_MAX_EMBED_BYTES, + ) + + expected = ( + f"Please review README.md and src/main.py\n\n" + f"{readme.as_uri()}\n```\nhello\n```\n\n" + f"{main_py.as_uri()}\n```\nprint('hi')\n```" + ) + assert rendered == expected + + +def test_treats_path_to_directory_as_resource_links(tmp_path: Path) -> None: + docs_dir = tmp_path / "docs" + docs_dir.mkdir() + + rendered = render_path_prompt( + "See @docs/ for details", + base_dir=tmp_path, + max_embed_bytes=DEFAULT_MAX_EMBED_BYTES, + ) + + expected = f"See docs/ for details\n\nuri: {docs_dir.as_uri()}\nname: docs/" + assert rendered == expected + + +def test_keeps_emails_and_embeds_paths(tmp_path: Path) -> None: + readme = tmp_path / "README.md" + readme.write_text("hello", encoding="utf-8") + + rendered = render_path_prompt( + "Contact user@example.com about @README.md", + base_dir=tmp_path, + max_embed_bytes=DEFAULT_MAX_EMBED_BYTES, + ) + + expected = ( + f"Contact user@example.com about README.md\n\n" + f"{readme.as_uri()}\n```\nhello\n```" + ) + assert rendered == expected + + +def test_ignores_nonexistent_paths(tmp_path: Path) -> None: + rendered = render_path_prompt( + "Missing @nope.txt here", + base_dir=tmp_path, + max_embed_bytes=DEFAULT_MAX_EMBED_BYTES, + ) + + assert rendered == "Missing @nope.txt here" + + +def test_falls_back_to_link_for_binary_files(tmp_path: Path) -> None: + binary_path = tmp_path / "image.bin" + binary_path.write_bytes(b"\x00\x01\x02") + + rendered = render_path_prompt( + "Inspect @image.bin", base_dir=tmp_path, max_embed_bytes=DEFAULT_MAX_EMBED_BYTES + ) + + assert ( + rendered == f"Inspect image.bin\n\nuri: {binary_path.as_uri()}\nname: image.bin" + ) + + +def test_excludes_supposed_binary_files_quickly_before_reading_content( + tmp_path: Path, +) -> None: + zip_like = tmp_path / "archive.zip" + zip_like.write_text("text content inside but treated as binary", encoding="utf-8") + + rendered = render_path_prompt( + "Inspect @archive.zip", + base_dir=tmp_path, + max_embed_bytes=DEFAULT_MAX_EMBED_BYTES, + ) + + assert ( + rendered + == f"Inspect archive.zip\n\nuri: {zip_like.as_uri()}\nname: archive.zip" + ) + + +def test_applies_max_embed_size_guard(tmp_path: Path) -> None: + large_file = tmp_path / "big.txt" + large_file.write_text("a" * 50, encoding="utf-8") + + rendered = render_path_prompt( + "Review @big.txt", base_dir=tmp_path, max_embed_bytes=10 + ) + + assert rendered == f"Review big.txt\n\nuri: {large_file.as_uri()}\nname: big.txt" + + +def test_parses_paths_with_special_characters_when_quoted(tmp_path: Path) -> None: + weird = tmp_path / "weird name(1).txt" + weird.write_text("odd", encoding="utf-8") + + rendered = render_path_prompt( + 'Open @"weird name(1).txt"', + base_dir=tmp_path, + max_embed_bytes=DEFAULT_MAX_EMBED_BYTES, + ) + + assert rendered == f"Open weird name(1).txt\n\n{weird.as_uri()}\n```\nodd\n```" + + +def test_deduplicates_identical_paths(tmp_path: Path) -> None: + readme = tmp_path / "README.md" + readme.write_text("hello", encoding="utf-8") + + rendered = render_path_prompt( + "See @README.md and again @README.md", + base_dir=tmp_path, + max_embed_bytes=DEFAULT_MAX_EMBED_BYTES, + ) + + assert ( + rendered + == f"See README.md and again README.md\n\n{readme.as_uri()}\n```\nhello\n```" + ) diff --git a/tests/autocompletion/test_slash_command_controller.py b/tests/autocompletion/test_slash_command_controller.py new file mode 100644 index 0000000..f836a63 --- /dev/null +++ b/tests/autocompletion/test_slash_command_controller.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +from typing import NamedTuple + +from textual import events + +from vibe.cli.autocompletion.base import CompletionResult, CompletionView +from vibe.cli.autocompletion.slash_command import SlashCommandController +from vibe.core.autocompletion.completers import CommandCompleter + + +class Suggestion(NamedTuple): + alias: str + description: str + + +class SuggestionEvent(NamedTuple): + suggestions: list[Suggestion] + selected_index: int + + +class Replacement(NamedTuple): + start: int + end: int + replacement: str + + +class StubView(CompletionView): + def __init__(self) -> None: + self.suggestion_events: list[SuggestionEvent] = [] + self.reset_count = 0 + self.replacements: list[Replacement] = [] + + def render_completion_suggestions( + self, suggestions: list[tuple[str, str]], selected_index: int + ) -> None: + typed = [Suggestion(alias, description) for alias, description in suggestions] + self.suggestion_events.append(SuggestionEvent(typed, selected_index)) + + def clear_completion_suggestions(self) -> None: + self.reset_count += 1 + + def replace_completion_range(self, start: int, end: int, replacement: str) -> None: + self.replacements.append(Replacement(start, end, replacement)) + + +def key_event(key: str) -> events.Key: + return events.Key(key, character=None) + + +def make_controller( + *, prefix: str | None = None +) -> tuple[SlashCommandController, StubView]: + commands = [ + ("/config", "Show current configuration"), + ("/compact", "Compact history"), + ("/help", "Display help"), + ("/config", "Override description"), + ("/summarize", "Summarize history"), + ("/logpath", "Show log path"), + ("/exit", "Exit application"), + ("/vim", "Toggle vim keybindings"), + ] + completer = CommandCompleter(commands) + view = StubView() + controller = SlashCommandController(completer, view) + + if prefix is not None: + controller.on_text_changed(prefix, cursor_index=len(prefix)) + view.suggestion_events.clear() + + return controller, view + + +def test_on_text_change_emits_matching_suggestions_in_insertion_order_and_ignores_duplicates() -> ( + None +): + controller, view = make_controller(prefix="/c") + + controller.on_text_changed("/c", cursor_index=2) + + suggestions, selected = view.suggestion_events[-1] + assert suggestions == [ + Suggestion("/config", "Override description"), + Suggestion("/compact", "Compact history"), + ] + assert selected == 0 + + +def test_on_text_change_filters_suggestions_case_insensitively() -> None: + controller, view = make_controller(prefix="/c") + + controller.on_text_changed("/CO", cursor_index=3) + + suggestions, _ = view.suggestion_events[-1] + assert [suggestion.alias for suggestion in suggestions] == ["/config", "/compact"] + + +def test_on_text_change_clears_suggestions_when_no_matches() -> None: + controller, view = make_controller(prefix="/c") + + controller.on_text_changed("/c", cursor_index=2) + controller.on_text_changed("config", cursor_index=6) + + assert view.reset_count >= 1 + + +def test_on_text_change_limits_the_number_of_results_to_five_and_preserve_insertion_order() -> ( + None +): + controller, view = make_controller(prefix="/") + + controller.on_text_changed("/", cursor_index=1) + + suggestions, selected_index = view.suggestion_events[-1] + assert len(suggestions) == 5 + assert [suggestion.alias for suggestion in suggestions] == [ + "/config", + "/compact", + "/help", + "/summarize", + "/logpath", + ] + + +def test_on_key_tab_applies_selected_completion() -> None: + controller, view = make_controller(prefix="/c") + + result = controller.on_key(key_event("tab"), text="/c", cursor_index=2) + + assert result is CompletionResult.HANDLED + assert view.replacements == [Replacement(0, 2, "/config")] + assert view.reset_count == 1 + + +def test_on_key_down_and_up_cycle_selection() -> None: + controller, view = make_controller(prefix="/c") + + controller.on_key(key_event("down"), text="/c", cursor_index=2) + suggestions, selected_index = view.suggestion_events[-1] + assert selected_index == 1 + + controller.on_key(key_event("down"), text="/c", cursor_index=2) + suggestions, selected_index = view.suggestion_events[-1] + assert selected_index == 0 + + controller.on_key(key_event("up"), text="/c", cursor_index=2) + suggestions, selected_index = view.suggestion_events[-1] + assert selected_index == 1 + assert [suggestion.alias for suggestion in suggestions] == ["/config", "/compact"] + + +def test_on_key_enter_submits_selected_completion() -> None: + controller, view = make_controller(prefix="/c") + + controller.on_key(key_event("down"), text="/c", cursor_index=2) + + result = controller.on_key(key_event("enter"), text="/c", cursor_index=2) + + assert result is CompletionResult.SUBMIT + assert view.replacements == [Replacement(0, 2, "/compact")] + assert view.reset_count == 1 diff --git a/tests/autocompletion/test_ui_chat_autocompletion.py b/tests/autocompletion/test_ui_chat_autocompletion.py new file mode 100644 index 0000000..e5c6d84 --- /dev/null +++ b/tests/autocompletion/test_ui_chat_autocompletion.py @@ -0,0 +1,306 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest +from textual.content import Content +from textual.style import Style +from textual.widgets import Markdown + +from vibe.cli.textual_ui.app import VibeApp +from vibe.cli.textual_ui.widgets.chat_input.completion_popup import CompletionPopup +from vibe.cli.textual_ui.widgets.chat_input.container import ChatInputContainer +from vibe.core.config import SessionLoggingConfig, VibeConfig + + +@pytest.fixture +def vibe_config() -> VibeConfig: + return VibeConfig(session_logging=SessionLoggingConfig(enabled=False)) + + +@pytest.fixture +def vibe_app(vibe_config: VibeConfig) -> VibeApp: + return VibeApp(config=vibe_config) + + +@pytest.mark.asyncio +async def test_popup_appears_with_matching_suggestions(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"/sum") + + popup_content = str(popup.render()) + assert popup.styles.display == "block" + assert "/summarize" in popup_content + assert "Compact conversation history by summarizing" in popup_content + assert chat_input.value == "/sum" + + +@pytest.mark.asyncio +async def test_popup_hides_when_input_cleared(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"/c") + await pilot.press("backspace", "backspace") + + assert popup.styles.display == "none" + + +@pytest.mark.asyncio +async def test_pressing_tab_writes_selected_command_and_keeps_popup_visible( + vibe_app: VibeApp, +) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"/co") + await pilot.press("tab") + + assert chat_input.value == "/config" + assert popup.styles.display == "block" + + +def ensure_selected_command(popup: CompletionPopup, expected_alias: str) -> None: + renderable = popup.render() + assert isinstance(renderable, Content) + content = str(renderable) + + selected_aliases: list[str] = [] + for span in renderable.spans: + style = span.style + if isinstance(style, Style) and style.reverse: + alias_text = content[span.start : span.end].strip() + alias = alias_text.split()[0] if alias_text else "" + selected_aliases.append(alias) + + assert len(selected_aliases) == 1 + assert selected_aliases[0] == expected_alias + + +@pytest.mark.asyncio +async def test_arrow_navigation_updates_selected_suggestion(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"/c") + + ensure_selected_command(popup, "/cfg") + await pilot.press("down") + ensure_selected_command(popup, "/config") + await pilot.press("up") + ensure_selected_command(popup, "/cfg") + + +@pytest.mark.asyncio +async def test_arrow_navigation_cycles_through_suggestions(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"/st") + + ensure_selected_command(popup, "/stats") + await pilot.press("down") + ensure_selected_command(popup, "/status") + await pilot.press("up") + ensure_selected_command(popup, "/stats") + + +@pytest.mark.asyncio +async def test_pressing_enter_submits_selected_command_and_hides_popup( + vibe_app: VibeApp, +) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"/hel") # typos:disable-line + await pilot.press("enter") + + assert chat_input.value == "" + assert popup.styles.display == "none" + message = vibe_app.query_one(".user-command-message") + message_content = message.query_one(Markdown) + assert "Show help message" in message_content.source + + +@pytest.fixture() +def file_tree(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path: + (tmp_path / "src" / "utils").mkdir(parents=True) + (tmp_path / "src" / "utils" / "config.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "utils" / "database.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "utils" / "error_handling.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "utils" / "logger.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "utils" / "sanitize.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "utils" / "validate.py").write_text("", encoding="utf-8") + (tmp_path / "src" / "main.py").write_text("", encoding="utf-8") + (tmp_path / "vibe" / "acp").mkdir(parents=True) + (tmp_path / "vibe" / "acp" / "entrypoint.py").write_text("", encoding="utf-8") + (tmp_path / "vibe" / "acp" / "agent.py").write_text("", encoding="utf-8") + (tmp_path / "README.md").write_text("", encoding="utf-8") + (tmp_path / ".env").write_text("", encoding="utf-8") + monkeypatch.chdir(tmp_path) + return tmp_path + + +@pytest.mark.asyncio +async def test_path_completion_popup_lists_files_and_directories( + vibe_app: VibeApp, file_tree: Path +) -> None: + async with vibe_app.run_test() as pilot: + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"@s") + + popup_content = str(popup.render()) + assert "@src/" in popup_content + assert popup.styles.display == "block" + + +@pytest.mark.asyncio +async def test_path_completion_popup_shows_up_to_ten_results( + vibe_app: VibeApp, file_tree: Path +) -> None: + async with vibe_app.run_test() as pilot: + (file_tree / "src" / "core" / "extra").mkdir(parents=True) + [ + (file_tree / "src" / "core" / "extra" / f"extra_file_{i}.py").write_text( + "", encoding="utf-8" + ) + for i in range(1, 13) + ] + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"@src/core/extra/") + + popup_content = str(popup.render()) + assert "@src/core/extra/extra_file_1.py" in popup_content + assert "@src/core/extra/extra_file_10.py" in popup_content + assert "@src/core/extra/extra_file_11.py" in popup_content + assert "@src/core/extra/extra_file_12.py" in popup_content + assert "@src/core/extra/extra_file_2.py" in popup_content + assert "@src/core/extra/extra_file_3.py" in popup_content + assert "@src/core/extra/extra_file_4.py" in popup_content + assert "@src/core/extra/extra_file_5.py" in popup_content + assert "@src/core/extra/extra_file_6.py" in popup_content + assert "@src/core/extra/extra_file_7.py" in popup_content + assert popup.styles.display == "block" + + +@pytest.mark.asyncio +async def test_pressing_tab_writes_selected_path_name_and_hides_popup( + vibe_app: VibeApp, file_tree: Path +) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"Print @REA") + await pilot.press("tab") + + assert chat_input.value == "Print @README.md " + assert popup.styles.display == "none" + + +@pytest.mark.asyncio +async def test_pressing_enter_writes_selected_path_name_and_hides_popup( + vibe_app: VibeApp, file_tree: Path +) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"Print @src/m") + await pilot.press("enter") + + assert chat_input.value == "Print @src/main.py " + assert popup.styles.display == "none" + + +@pytest.mark.asyncio +async def test_fuzzy_matches_subsequence_characters( + file_tree: Path, vibe_app: VibeApp +) -> None: + async with vibe_app.run_test() as pilot: + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"@src/utils/handling") + + popup_content = str(popup.render()) + assert "@src/utils/error_handling.py" in popup_content + assert popup.styles.display == "block" + + +@pytest.mark.asyncio +async def test_fuzzy_matches_word_boundaries( + file_tree: Path, vibe_app: VibeApp +) -> None: + async with vibe_app.run_test() as pilot: + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"@src/utils/eh") + + popup_content = str(popup.render()) + assert "@src/utils/error_handling.py" in popup_content + assert popup.styles.display == "block" + + +@pytest.mark.asyncio +async def test_finds_files_recursively_by_filename( + file_tree: Path, vibe_app: VibeApp +) -> None: + async with vibe_app.run_test() as pilot: + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"@entryp") + + popup_content = str(popup.render()) + assert "@vibe/acp/entrypoint.py" in popup_content + assert popup.styles.display == "block" + + +@pytest.mark.asyncio +async def test_finds_files_recursively_with_partial_path( + file_tree: Path, vibe_app: VibeApp +) -> None: + async with vibe_app.run_test() as pilot: + popup = vibe_app.query_one(CompletionPopup) + + await pilot.press(*"@acp/entry") + + popup_content = str(popup.render()) + assert "@vibe/acp/entrypoint.py" in popup_content + assert popup.styles.display == "block" + + +@pytest.mark.asyncio +async def test_does_not_trigger_completion_when_navigating_history( + file_tree: Path, vibe_app: VibeApp +) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + popup = vibe_app.query_one(CompletionPopup) + message_with_path = "Check @src/m" + message_to_fill_history = "Yet another message to fill history" + + await pilot.press(*message_with_path) + await pilot.press("tab", "enter") + await pilot.press(*message_to_fill_history) + await pilot.press("enter") + await pilot.press("up", "up") + assert chat_input.value == "Check @src/main.py" + await pilot.pause(0.2) + # ensure popup is hidden - user was navigating history: we don't want to interrupt + assert popup.styles.display == "none" + await pilot.press("down") + await pilot.pause(0.1) + assert popup.styles.display == "none" + # get back to the message with path completion; ensure again + await pilot.press("up") + await pilot.pause(0.1) + assert chat_input.value == "Check @src/main.py" + await pilot.pause(0.2) + assert popup.styles.display == "none" diff --git a/tests/backend/__init__.py b/tests/backend/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/backend/data/__init__.py b/tests/backend/data/__init__.py new file mode 100644 index 0000000..0afb089 --- /dev/null +++ b/tests/backend/data/__init__.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +Url = str +JsonResponse = dict +ResultData = dict +Chunk = bytes diff --git a/tests/backend/data/fireworks.py b/tests/backend/data/fireworks.py new file mode 100644 index 0000000..5ecb6eb --- /dev/null +++ b/tests/backend/data/fireworks.py @@ -0,0 +1,183 @@ +from __future__ import annotations + +from tests.backend.data import Chunk, JsonResponse, ResultData, Url + +SIMPLE_CONVERSATION_PARAMS: list[tuple[Url, JsonResponse, ResultData]] = [ + ( + "https://api.fireworks.ai", + { + "id": "fake_id_1234", + "object": "chat.completion", + "created": 1234567890, + "model": "accounts/fireworks/models/glm-4p5", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Some content", + "reasoning_content": "Some reasoning content", + }, + "finish_reason": "stop", + } + ], + "usage": { + "prompt_tokens": 100, + "total_tokens": 300, + "completion_tokens": 200, + "prompt_tokens_details": {"cached_tokens": 0}, + }, + }, + { + "message": "Some content", + "finish_reason": "stop", + "usage": { + "prompt_tokens": 100, + "total_tokens": 300, + "completion_tokens": 200, + }, + }, + ) +] + +TOOL_CONVERSATION_PARAMS: list[tuple[Url, JsonResponse, ResultData]] = [ + ( + "https://api.fireworks.ai", + { + "id": "fake_id_1234", + "object": "chat.completion", + "created": 1234567890, + "model": "accounts/fireworks/models/glm-4p5", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "reasoning_content": "Some reasoning content", + "tool_calls": [ + { + "index": 0, + "id": "fake_id_5678", + "type": "function", + "function": { + "name": "some_tool", + "arguments": '{"some_argument": "some_argument_value"}', + }, + "name": None, + } + ], + }, + "finish_reason": "tool_calls", + } + ], + "usage": { + "prompt_tokens": 100, + "completion_tokens": 200, + "prompt_tokens_details": {"cached_tokens": 0}, + }, + }, + { + "message": "", + "finish_reason": "tool_calls", + "tool_calls": [ + { + "name": "some_tool", + "arguments": '{"some_argument": "some_argument_value"}', + "index": 0, + } + ], + "usage": {"prompt_tokens": 100, "completion_tokens": 200}, + }, + ) +] + +STREAMED_SIMPLE_CONVERSATION_PARAMS: list[tuple[Url, list[Chunk], list[ResultData]]] = [ + ( + "https://api.fireworks.ai", + [ + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"accounts/fireworks/models/glm-4p5","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}],"usage":null}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"accounts/fireworks/models/glm-4p5","choices":[{"index":0,"delta":{"reasoning_content":"Some reasoning content"},"finish_reason":null}],"usage":null}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"accounts/fireworks/models/glm-4p5","choices":[{"index":0,"delta":{"content":"Some content"},"finish_reason":null}],"usage":null}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"accounts/fireworks/models/glm-4p5","choices":[{"index":0,"delta":{},"finish_reason":"stop"}],"usage":{"prompt_tokens":100,"total_tokens":300,"completion_tokens":200,"prompt_tokens_details":{"cached_tokens":0}}}', + rb"data: [DONE]", + ], + [ + { + "message": "", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "Some content", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": "stop", + "usage": {"prompt_tokens": 100, "completion_tokens": 200}, + }, + ], + ) +] + + +STREAMED_TOOL_CONVERSATION_PARAMS: list[tuple[Url, list[Chunk], list[ResultData]]] = [ + ( + "https://api.fireworks.ai", + [ + rb'data: {"id": "fake_id_1234","object": "chat.completion.chunk","created": 1234567890,"model": "accounts/fireworks/models/glm-4p5","choices": [{"index": 0, "delta": {"role": "assistant"}, "finish_reason": null}],"usage": null}', + rb'data: {"id": "fake_id_1234","object": "chat.completion.chunk","created": 1234567890,"model": "accounts/fireworks/models/glm-4p5","choices": [{"index": 0,"delta": {"reasoning_content": "Some reasoning content"},"finish_reason": null}],"usage": null}', + rb'data: {"id": "fake_id_1234","object": "chat.completion.chunk","created": 1234567890,"model": "accounts/fireworks/models/glm-4p5","choices": [{"index": 0,"delta": {"content": "Some content"},"finish_reason": null}],"usage": null}', + rb'data: {"id": "fake_id_1234","object": "chat.completion.chunk","created": 1234567890,"model": "accounts/fireworks/models/glm-4p5","choices": [{"index": 0,"delta": {"tool_calls": [{"index": 0,"id": "fake_id_151617","type": "function","function": {"name": "some_tool"}}]},"finish_reason": null}],"usage": null}', + rb'data: {"id": "fake_id_1234","object": "chat.completion.chunk","created": 1234567890,"model": "accounts/fireworks/models/glm-4p5","choices": [{"index": 0,"delta": {"tool_calls": [{"index": 0,"id": null,"type": "function","function": {"arguments": "{\"some_argument\": \"some_arguments_value\"}"}}]},"finish_reason": null}],"usage": null}', + rb'data: {"id": "fake_id_1234","object": "chat.completion.chunk","created": 1234567890,"model": "accounts/fireworks/models/glm-4p5","choices": [{"index": 0, "delta": {}, "finish_reason": "tool_calls"}],"usage": {"prompt_tokens": 100,"total_tokens": 300,"completion_tokens": 200,"prompt_tokens_details": {"cached_tokens": 190}}}', + rb"data: [DONE]", + ], + [ + { + "message": "", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "Some content", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": None, + "tool_calls": [{"name": "some_tool", "arguments": None, "index": 0}], + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": None, + "tool_calls": [ + { + "name": None, + "arguments": '{"some_argument": "some_arguments_value"}', + "index": 0, + } + ], + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": "tool_calls", + "usage": {"prompt_tokens": 100, "completion_tokens": 200}, + }, + ], + ) +] diff --git a/tests/backend/data/mistral.py b/tests/backend/data/mistral.py new file mode 100644 index 0000000..0bcdec1 --- /dev/null +++ b/tests/backend/data/mistral.py @@ -0,0 +1,173 @@ +from __future__ import annotations + +from tests.backend.data import Chunk, JsonResponse, ResultData, Url + +SIMPLE_CONVERSATION_PARAMS: list[tuple[Url, JsonResponse, ResultData]] = [ + ( + "https://api.mistral.ai", + { + "id": "fake_id_1234", + "created": 1234567890, + "model": "devstral-latest", + "usage": { + "prompt_tokens": 100, + "total_tokens": 300, + "completion_tokens": 200, + }, + "object": "chat.completion", + "choices": [ + { + "index": 0, + "finish_reason": "stop", + "message": { + "role": "assistant", + "tool_calls": None, + "content": "Some content", + }, + } + ], + }, + { + "message": "Some content", + "finish_reason": "stop", + "usage": { + "prompt_tokens": 100, + "total_tokens": 300, + "completion_tokens": 200, + }, + }, + ) +] + +TOOL_CONVERSATION_PARAMS: list[tuple[Url, JsonResponse, ResultData]] = [ + ( + "https://api.mistral.ai", + { + "id": "fake_id_1234", + "created": 1234567890, + "model": "devstral-latest", + "usage": { + "prompt_tokens": 100, + "total_tokens": 300, + "completion_tokens": 200, + }, + "object": "chat.completion", + "choices": [ + { + "index": 0, + "finish_reason": "tool_calls", + "message": { + "role": "assistant", + "tool_calls": [ + { + "id": "fake_id_5678", + "function": { + "name": "some_tool", + "arguments": '{"some_argument": "some_argument_value"}', + }, + "index": 0, + } + ], + "content": "Some content", + }, + } + ], + }, + { + "message": "Some content", + "finish_reason": "tool_calls", + "tool_calls": [ + { + "name": "some_tool", + "arguments": '{"some_argument": "some_argument_value"}', + "index": 0, + } + ], + "usage": {"prompt_tokens": 100, "completion_tokens": 200}, + }, + ) +] + +STREAMED_SIMPLE_CONVERSATION_PARAMS: list[tuple[Url, list[Chunk], list[ResultData]]] = [ + ( + "https://api.mistral.ai", + [ + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"content":"Some content"},"finish_reason":null}],"p":"abcde"}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"content":""},"finish_reason":"stop"}],"usage":{"prompt_tokens":100,"total_tokens":300,"completion_tokens":200},"p":"abcdefghijklmnopq"}', + rb"data: [DONE]", + ], + [ + { + "message": "", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "Some content", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": "stop", + "usage": {"prompt_tokens": 100, "completion_tokens": 200}, + }, + ], + ) +] + + +STREAMED_TOOL_CONVERSATION_PARAMS: list[tuple[Url, list[Chunk], list[ResultData]]] = [ + ( + "https://api.mistral.ai", + [ + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"content":"Some content"},"finish_reason":null}],"p":"a"}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"tool_calls":[{"id":"fake_id_1234","function":{"name":"some_tool","arguments":""},"index":0}]},"finish_reason":null}],"p":"abcdef"}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"tool_calls":[{"function":{"name":"","arguments":"{\"some_argument\": "},"index":0}]},"finish_reason":null}],"p":"abcdefghijklmnopq"}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"tool_calls":[{"id":"null","function":{"name":"","arguments":"\"some_argument_value\"}"},"index":0}]},"finish_reason":null}],"p":"abcdefghijklmnopqrstuvwxyz0123456"}', + rb'data: {"id":"fake_id_1234","object":"chat.completion.chunk","created":1234567890,"model":"devstral-latest","choices":[{"index":0,"delta":{"content":""},"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":100,"total_tokens":300,"completion_tokens":200},"p":"abcdefghijklmnopq"}', + rb"data: [DONE]", + ], + [ + { + "message": "", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "Some content", + "finish_reason": None, + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": None, + "tool_calls": [{"name": "some_tool", "arguments": "", "index": 0}], + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": None, + "tool_calls": [ + {"name": "", "arguments": '{"some_argument": ', "index": 0} + ], + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": None, + "tool_calls": [ + {"name": "", "arguments": '"some_argument_value"}', "index": 0} + ], + "usage": {"prompt_tokens": 0, "completion_tokens": 0}, + }, + { + "message": "", + "finish_reason": "tool_calls", + "usage": {"prompt_tokens": 100, "completion_tokens": 200}, + }, + ], + ) +] diff --git a/tests/backend/test_backend.py b/tests/backend/test_backend.py new file mode 100644 index 0000000..93c4048 --- /dev/null +++ b/tests/backend/test_backend.py @@ -0,0 +1,248 @@ +"""Test data for this module was generated using real LLM provider API responses, +with responses simplified and formatted to make them readable and maintainable. + +To update or modify test parameters: +1. Make actual API calls to the target providers +2. Use the raw API responses as a base for updating test data +3. Simplify only where necessary for readability while preserving core structure + +The closer test data remains to real API responses, the more reliable and accurate +the tests will be. Always prefer real API data over manually constructed examples. +""" + +from __future__ import annotations + +import httpx +import pytest +import respx + +from tests.backend.data import Chunk, JsonResponse, ResultData, Url +from tests.backend.data.fireworks import ( + SIMPLE_CONVERSATION_PARAMS as FIREWORKS_SIMPLE_CONVERSATION_PARAMS, + STREAMED_SIMPLE_CONVERSATION_PARAMS as FIREWORKS_STREAMED_SIMPLE_CONVERSATION_PARAMS, + STREAMED_TOOL_CONVERSATION_PARAMS as FIREWORKS_STREAMED_TOOL_CONVERSATION_PARAMS, + TOOL_CONVERSATION_PARAMS as FIREWORKS_TOOL_CONVERSATION_PARAMS, +) +from tests.backend.data.mistral import ( + SIMPLE_CONVERSATION_PARAMS as MISTRAL_SIMPLE_CONVERSATION_PARAMS, + STREAMED_SIMPLE_CONVERSATION_PARAMS as MISTRAL_STREAMED_SIMPLE_CONVERSATION_PARAMS, + STREAMED_TOOL_CONVERSATION_PARAMS as MISTRAL_STREAMED_TOOL_CONVERSATION_PARAMS, + TOOL_CONVERSATION_PARAMS as MISTRAL_TOOL_CONVERSATION_PARAMS, +) +from vibe.core.config import ModelConfig, ProviderConfig +from vibe.core.llm.backend.generic import GenericBackend +from vibe.core.llm.backend.mistral import MistralBackend +from vibe.core.llm.exceptions import BackendError +from vibe.core.llm.types import BackendLike +from vibe.core.types import LLMChunk, LLMMessage, Role, ToolCall + + +class TestBackend: + @pytest.mark.asyncio + @pytest.mark.parametrize( + "base_url,json_response,result_data", + [ + *FIREWORKS_SIMPLE_CONVERSATION_PARAMS, + *FIREWORKS_TOOL_CONVERSATION_PARAMS, + *MISTRAL_SIMPLE_CONVERSATION_PARAMS, + *MISTRAL_TOOL_CONVERSATION_PARAMS, + ], + ) + async def test_backend_complete( + self, base_url: Url, json_response: JsonResponse, result_data: ResultData + ): + with respx.mock(base_url=base_url) as mock_api: + mock_api.post("/v1/chat/completions").mock( + return_value=httpx.Response(status_code=200, json=json_response) + ) + provider = ProviderConfig( + name="provider_name", + api_base=f"{base_url}/v1", + api_key_env_var="API_KEY", + ) + + BackendClasses = [ + GenericBackend, + *([MistralBackend] if base_url == "https://api.mistral.ai" else []), + ] + for BackendClass in BackendClasses: + backend: BackendLike = BackendClass(provider=provider) + model = ModelConfig( + name="model_name", provider="provider_name", alias="model_alias" + ) + messages = [LLMMessage(role=Role.user, content="Just say hi")] + + result = await backend.complete( + model=model, + messages=messages, + temperature=0.2, + tools=None, + max_tokens=None, + tool_choice=None, + extra_headers=None, + ) + + assert result.message.content == result_data["message"] + assert result.finish_reason == result_data["finish_reason"] + assert result.usage is not None + assert ( + result.usage.prompt_tokens == result_data["usage"]["prompt_tokens"] + ) + assert ( + result.usage.completion_tokens + == result_data["usage"]["completion_tokens"] + ) + + if result.message.tool_calls is None: + return + + assert len(result.message.tool_calls) == len(result_data["tool_calls"]) + for i, tool_call in enumerate[ToolCall](result.message.tool_calls): + assert ( + tool_call.function.name == result_data["tool_calls"][i]["name"] + ) + assert ( + tool_call.function.arguments + == result_data["tool_calls"][i]["arguments"] + ) + assert tool_call.index == result_data["tool_calls"][i]["index"] + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "base_url,chunks,result_data", + [ + *FIREWORKS_STREAMED_SIMPLE_CONVERSATION_PARAMS, + *FIREWORKS_STREAMED_TOOL_CONVERSATION_PARAMS, + *MISTRAL_STREAMED_SIMPLE_CONVERSATION_PARAMS, + *MISTRAL_STREAMED_TOOL_CONVERSATION_PARAMS, + ], + ) + async def test_backend_complete_streaming( + self, base_url: Url, chunks: list[Chunk], result_data: list[ResultData] + ): + with respx.mock(base_url=base_url) as mock_api: + mock_api.post("/v1/chat/completions").mock( + return_value=httpx.Response( + status_code=200, + stream=httpx.ByteStream(stream=b"\n\n".join(chunks)), + headers={"Content-Type": "text/event-stream"}, + ) + ) + provider = ProviderConfig( + name="provider_name", + api_base=f"{base_url}/v1", + api_key_env_var="API_KEY", + ) + BackendClasses = [ + GenericBackend, + *([MistralBackend] if base_url == "https://api.mistral.ai" else []), + ] + for BackendClass in BackendClasses: + backend: BackendLike = BackendClass(provider=provider) + model = ModelConfig( + name="model_name", provider="provider_name", alias="model_alias" + ) + + messages = [ + LLMMessage(role=Role.user, content="List files in current dir") + ] + + results: list[LLMChunk] = [] + async for result in backend.complete_streaming( + model=model, + messages=messages, + temperature=0.2, + tools=None, + max_tokens=None, + tool_choice=None, + extra_headers=None, + ): + results.append(result) + + for result, expected_result in zip(results, result_data, strict=True): + assert result.message.content == expected_result["message"] + assert result.finish_reason == expected_result["finish_reason"] + assert result.usage is not None + assert ( + result.usage.prompt_tokens + == expected_result["usage"]["prompt_tokens"] + ) + assert ( + result.usage.completion_tokens + == expected_result["usage"]["completion_tokens"] + ) + + if result.message.tool_calls is None: + continue + + for i, tool_call in enumerate(result.message.tool_calls): + assert ( + tool_call.function.name + == expected_result["tool_calls"][i]["name"] + ) + assert ( + tool_call.function.arguments + == expected_result["tool_calls"][i]["arguments"] + ) + assert ( + tool_call.index == expected_result["tool_calls"][i]["index"] + ) + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "base_url,backend_class,response", + [ + ( + "https://api.fireworks.ai", + GenericBackend, + httpx.Response(status_code=500, text="Internal Server Error"), + ), + ( + "https://api.fireworks.ai", + GenericBackend, + httpx.Response(status_code=429, text="Rate Limit Exceeded"), + ), + ( + "https://api.mistral.ai", + MistralBackend, + httpx.Response(status_code=500, text="Internal Server Error"), + ), + ( + "https://api.mistral.ai", + MistralBackend, + httpx.Response(status_code=429, text="Rate Limit Exceeded"), + ), + ], + ) + async def test_backend_complete_streaming_error( + self, + base_url: Url, + backend_class: type[MistralBackend | GenericBackend], + response: httpx.Response, + ): + with respx.mock(base_url=base_url) as mock_api: + mock_api.post("/v1/chat/completions").mock(return_value=response) + provider = ProviderConfig( + name="provider_name", + api_base=f"{base_url}/v1", + api_key_env_var="API_KEY", + ) + backend = backend_class(provider=provider) + model = ModelConfig( + name="model_name", provider="provider_name", alias="model_alias" + ) + messages = [LLMMessage(role=Role.user, content="Just say hi")] + with pytest.raises(BackendError) as e: + async for _ in backend.complete_streaming( + model=model, + messages=messages, + temperature=0.2, + tools=None, + max_tokens=None, + tool_choice=None, + extra_headers=None, + ): + pass + assert e.value.status == response.status_code + assert e.value.reason == response.reason_phrase + assert e.value.parsed_error is None diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..3fa0537 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import sys +from typing import Any + +from pydantic.fields import FieldInfo +from pydantic_settings import BaseSettings, PydanticBaseSettingsSource +import pytest + +_in_mem_config: dict[str, Any] = {} + + +class InMemSettingsSource(PydanticBaseSettingsSource): + def __init__(self, settings_cls: type[BaseSettings]) -> None: + super().__init__(settings_cls) + + def get_field_value( + self, field: FieldInfo, field_name: str + ) -> tuple[Any, str, bool]: + return _in_mem_config.get(field_name), field_name, False + + def __call__(self) -> dict[str, Any]: + return _in_mem_config + + +@pytest.fixture(autouse=True, scope="session") +def _patch_vibe_config() -> None: + """Patch VibeConfig.settings_customise_sources to only use init_settings in tests. + + This ensures that even production code that creates VibeConfig instances + will only use init_settings and ignore environment variables and config files. + Runs once per test session before any tests execute. + """ + from vibe.core.config import VibeConfig + + def patched_settings_customise_sources( + cls, + settings_cls: type[BaseSettings], + init_settings: PydanticBaseSettingsSource, + env_settings: PydanticBaseSettingsSource, + dotenv_settings: PydanticBaseSettingsSource, + file_secret_settings: PydanticBaseSettingsSource, + ) -> tuple[PydanticBaseSettingsSource, ...]: + return (init_settings, InMemSettingsSource(settings_cls)) + + VibeConfig.settings_customise_sources = classmethod( + patched_settings_customise_sources + ) # type: ignore[assignment] + + def dump_config(cls, config: dict[str, Any]) -> None: + global _in_mem_config + _in_mem_config = config + + VibeConfig.dump_config = classmethod(dump_config) # type: ignore[assignment] + + def patched_load(cls, agent: str | None = None, **overrides: Any) -> Any: + return cls(**overrides) + + VibeConfig.load = classmethod(patched_load) # type: ignore[assignment] + + +@pytest.fixture(autouse=True) +def _reset_in_mem_config() -> None: + """Reset in-memory config before each test to prevent test isolation issues. + + This ensures that each test starts with a clean configuration state, + preventing race conditions and test interference when tests run in parallel + or when VibeConfig.save_updates() modifies the shared _in_mem_config dict. + """ + global _in_mem_config + _in_mem_config = {} + + +@pytest.fixture(autouse=True) +def _mock_api_key(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("MISTRAL_API_KEY", "mock") + + +@pytest.fixture(autouse=True) +def _mock_platform(monkeypatch: pytest.MonkeyPatch) -> None: + """Mock platform to be Linux with /bin/sh shell for consistent test behavior. + + This ensures that platform-specific system prompt generation is consistent + across all tests regardless of the actual platform running the tests. + """ + monkeypatch.setattr(sys, "platform", "linux") + monkeypatch.setenv("SHELL", "/bin/sh") diff --git a/tests/core/test_config_migration.py b/tests/core/test_config_migration.py new file mode 100644 index 0000000..d2241da --- /dev/null +++ b/tests/core/test_config_migration.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from contextlib import contextmanager +from pathlib import Path +import tomllib + +import tomli_w + +from vibe.core import config +from vibe.core.config import VibeConfig + + +def _restore_dump_config(config_file: Path): + original_dump_config = VibeConfig.dump_config + + def real_dump_config(cls, config_dict: dict) -> None: + try: + with config_file.open("wb") as f: + tomli_w.dump(config_dict, f) + except OSError: + config_file.write_text( + "\n".join( + f"{k} = {v!r}" for k, v in config_dict.items() if v is not None + ), + encoding="utf-8", + ) + + VibeConfig.dump_config = classmethod(real_dump_config) # type: ignore[assignment] + return original_dump_config + + +@contextmanager +def _migrate_config_file(tmp_path: Path, content: str): + config_file = tmp_path / "config.toml" + config_file.write_text(content, encoding="utf-8") + + original_config_file = config.CONFIG_FILE + original_dump_config = _restore_dump_config(config_file) + + try: + config.CONFIG_FILE = config_file + VibeConfig._migrate() + yield config_file + finally: + config.CONFIG_FILE = original_config_file + VibeConfig.dump_config = original_dump_config + + +def _load_migrated_config(config_file: Path) -> dict: + with config_file.open("rb") as f: + return tomllib.load(f) diff --git a/tests/mock/__init__.py b/tests/mock/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/mock/mock_backend_factory.py b/tests/mock/mock_backend_factory.py new file mode 100644 index 0000000..c15ad65 --- /dev/null +++ b/tests/mock/mock_backend_factory.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from contextlib import contextmanager + +from vibe.core.config import Backend +from vibe.core.llm.backend.factory import BACKEND_FACTORY + + +@contextmanager +def mock_backend_factory(backend_type: Backend, factory_func): + original = BACKEND_FACTORY[backend_type] + try: + BACKEND_FACTORY[backend_type] = factory_func + yield + finally: + BACKEND_FACTORY[backend_type] = original diff --git a/tests/mock/mock_entrypoint.py b/tests/mock/mock_entrypoint.py new file mode 100644 index 0000000..5afca95 --- /dev/null +++ b/tests/mock/mock_entrypoint.py @@ -0,0 +1,66 @@ +"""Wrapper script that intercepts LLM calls when mocking is enabled. + +This script is used to mock the LLM calls when testing the CLI. +Mocked returns are stored in the VIBE_MOCK_LLM_DATA environment variable. +""" + +from __future__ import annotations + +from collections.abc import AsyncGenerator +import json +import os +import sys +from unittest.mock import patch + +from pydantic import ValidationError + +from tests import TESTS_ROOT +from tests.mock.utils import MOCK_DATA_ENV_VAR +from vibe.core.types import LLMChunk + + +def mock_llm_output() -> None: + sys.path.insert(0, str(TESTS_ROOT)) + + # Apply mocking before importing any vibe modules + mock_data_str = os.environ.get(MOCK_DATA_ENV_VAR) + if not mock_data_str: + raise ValueError(f"{MOCK_DATA_ENV_VAR} is not set") + mock_data = json.loads(mock_data_str) + try: + chunks = [LLMChunk.model_validate(chunk) for chunk in mock_data] + except ValidationError as e: + raise ValueError(f"Invalid mock data: {e}") from e + + chunk_iterable = iter(chunks) + + async def mock_complete(*args, **kwargs) -> LLMChunk: + return next(chunk_iterable) + + async def mock_complete_streaming(*args, **kwargs) -> AsyncGenerator[LLMChunk]: + yield next(chunk_iterable) + + patch( + "vibe.core.llm.backend.mistral.MistralBackend.complete", + side_effect=mock_complete, + ).start() + patch( + "vibe.core.llm.backend.generic.GenericBackend.complete", + side_effect=mock_complete, + ).start() + patch( + "vibe.core.llm.backend.mistral.MistralBackend.complete_streaming", + side_effect=mock_complete_streaming, + ).start() + patch( + "vibe.core.llm.backend.generic.GenericBackend.complete_streaming", + side_effect=mock_complete_streaming, + ).start() + + +if __name__ == "__main__": + mock_llm_output() + + from vibe.acp.entrypoint import main + + main() diff --git a/tests/mock/utils.py b/tests/mock/utils.py new file mode 100644 index 0000000..cfb4152 --- /dev/null +++ b/tests/mock/utils.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import json + +from vibe.core.types import LLMChunk, LLMMessage, LLMUsage, Role, ToolCall + +MOCK_DATA_ENV_VAR = "VIBE_MOCK_LLM_DATA" + + +def mock_llm_chunk( + content: str = "Hello!", + role: Role = Role.assistant, + tool_calls: list[ToolCall] | None = None, + name: str | None = None, + tool_call_id: str | None = None, + finish_reason: str | None = None, + prompt_tokens: int = 10, + completion_tokens: int = 5, +) -> LLMChunk: + message = LLMMessage( + role=role, + content=content, + tool_calls=tool_calls, + name=name, + tool_call_id=tool_call_id, + ) + return LLMChunk( + message=message, + usage=LLMUsage( + prompt_tokens=prompt_tokens, completion_tokens=completion_tokens + ), + finish_reason=finish_reason, + ) + + +def get_mocking_env(mock_chunks: list[LLMChunk] | None = None) -> dict[str, str]: + if mock_chunks is None: + mock_chunks = [mock_llm_chunk()] + + mock_data = [LLMChunk.model_dump(mock_chunk) for mock_chunk in mock_chunks] + + return {MOCK_DATA_ENV_VAR: json.dumps(mock_data)} diff --git a/tests/onboarding/test_run_onboarding.py b/tests/onboarding/test_run_onboarding.py new file mode 100644 index 0000000..4f2a4db --- /dev/null +++ b/tests/onboarding/test_run_onboarding.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +from pathlib import Path +import sys +from typing import override + +import pytest +from textual.app import App + +from vibe.setup import onboarding + + +class StubApp(App[str | None]): + def __init__(self, return_value: str | None) -> None: + super().__init__() + self._return_value = return_value + + @override + def run(self, *args: object, **kwargs: object) -> str | None: + return self._return_value + + +def _patch_env_file(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> Path: + env_file = tmp_path / ".env" + monkeypatch.setattr(onboarding, "GLOBAL_ENV_FILE", env_file, raising=False) + return env_file + + +def _exit_raiser(code: int = 0) -> None: + raise SystemExit(code) + + +def test_exits_on_cancel( + monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str], tmp_path: Path +) -> None: + _patch_env_file(monkeypatch, tmp_path) + monkeypatch.setattr(sys, "exit", _exit_raiser) + + with pytest.raises(SystemExit) as excinfo: + onboarding.run_onboarding(StubApp(None)) + + assert excinfo.value.code == 0 + out = capsys.readouterr().out + assert "Setup cancelled. See you next time!" in out + + +def test_warns_on_save_error( + monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str], tmp_path: Path +) -> None: + _patch_env_file(monkeypatch, tmp_path) + monkeypatch.setattr(sys, "exit", _exit_raiser) + + onboarding.run_onboarding(StubApp("save_error:disk full")) + + out = capsys.readouterr().out + assert "Could not save API key" in out + assert "disk full" in out + + +def test_successfully_completes( + monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str], tmp_path: Path +) -> None: + _patch_env_file(monkeypatch, tmp_path) + monkeypatch.setattr(sys, "exit", _exit_raiser) + + onboarding.run_onboarding(StubApp("completed")) + + out = capsys.readouterr().out + assert out == "" diff --git a/tests/onboarding/test_ui_onboarding.py b/tests/onboarding/test_ui_onboarding.py new file mode 100644 index 0000000..535a98a --- /dev/null +++ b/tests/onboarding/test_ui_onboarding.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path +from typing import Any + +import pytest +from textual.events import Resize +from textual.geometry import Size +from textual.pilot import Pilot +from textual.widgets import Input + +from vibe.core import config as core_config +from vibe.setup.onboarding import OnboardingApp +import vibe.setup.onboarding.screens.api_key as api_key_module +from vibe.setup.onboarding.screens.api_key import ApiKeyScreen +from vibe.setup.onboarding.screens.theme_selection import THEMES, ThemeSelectionScreen + + +async def _wait_for( + condition: Callable[[], bool], + pilot: Pilot, + timeout: float = 5.0, + interval: float = 0.05, +) -> None: + elapsed = 0.0 + while not condition(): + await pilot.pause(interval) + if (elapsed := elapsed + interval) >= timeout: + msg = "Timed out waiting for condition." + raise AssertionError(msg) + + +@pytest.fixture() +def onboarding_app( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path +) -> tuple[OnboardingApp, Path, dict[str, Any]]: + vibe_home = tmp_path / ".vibe" + env_file = vibe_home / ".env" + saved_updates: dict[str, Any] = {} + + def record_updates(updates: dict[str, Any]) -> None: + saved_updates.update(updates) + + monkeypatch.setenv("VIBE_HOME", str(vibe_home)) + + for module in (core_config, api_key_module): + monkeypatch.setattr(module, "GLOBAL_CONFIG_DIR", vibe_home, raising=False) + monkeypatch.setattr(module, "GLOBAL_ENV_FILE", env_file, raising=False) + + monkeypatch.setattr( + core_config.VibeConfig, + "save_updates", + classmethod(lambda cls, updates: record_updates(updates)), + ) + + return OnboardingApp(), env_file, saved_updates + + +async def pass_welcome_screen(pilot: Pilot) -> None: + welcome_screen = pilot.app.get_screen("welcome") + await _wait_for( + lambda: not welcome_screen.query_one("#enter-hint").has_class("hidden"), pilot + ) + await pilot.press("enter") + await _wait_for(lambda: isinstance(pilot.app.screen, ThemeSelectionScreen), pilot) + + +@pytest.mark.asyncio +async def test_ui_gets_through_the_onboarding_successfully( + onboarding_app: tuple[OnboardingApp, Path, dict[str, Any]], +) -> None: + app, env_file, config_updates = onboarding_app + api_key_value = "sk-onboarding-test-key" + + async with app.run_test() as pilot: + await pass_welcome_screen(pilot) + + await pilot.press("enter") + await _wait_for(lambda: isinstance(app.screen, ApiKeyScreen), pilot) + api_screen = app.screen + input_widget = api_screen.query_one("#key", Input) + await pilot.press(*api_key_value) + assert input_widget.value == api_key_value + + await pilot.press("enter") + await _wait_for(lambda: app.return_value is not None, pilot, timeout=2.0) + + assert app.return_value == "completed" + + assert env_file.is_file() + env_contents = env_file.read_text(encoding="utf-8") + assert "MISTRAL_API_KEY" in env_contents + assert api_key_value in env_contents + + assert config_updates.get("textual_theme") == app.theme + + +@pytest.mark.asyncio +async def test_ui_can_pick_a_theme_and_saves_selection( + onboarding_app: tuple[OnboardingApp, Path, dict[str, Any]], +) -> None: + app, _, config_updates = onboarding_app + + async with app.run_test() as pilot: + await pass_welcome_screen(pilot) + + theme_screen = app.screen + app.post_message( + Resize(Size(40, 10), Size(40, 10)) + ) # trigger the resize event handler + preview = theme_screen.query_one("#preview") + assert preview.styles.max_height is not None + target_theme = "gruvbox" + assert target_theme in THEMES + start_index = THEMES.index(app.theme) + target_index = THEMES.index(target_theme) + steps_down = (target_index - start_index) % len(THEMES) + await pilot.press(*["down"] * steps_down) + assert app.theme == target_theme + await pilot.press("enter") + await _wait_for(lambda: isinstance(app.screen, ApiKeyScreen), pilot) + + assert config_updates.get("textual_theme") == target_theme diff --git a/tests/playground/.gitkeep b/tests/playground/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/tests/snapshots/__snapshots__/test_ui_snapshot_basic_conversation/test_snapshot_shows_basic_conversation.svg b/tests/snapshots/__snapshots__/test_ui_snapshot_basic_conversation/test_snapshot_shows_basic_conversation.svg new file mode 100644 index 0000000..cab55e1 --- /dev/null +++ b/tests/snapshots/__snapshots__/test_ui_snapshot_basic_conversation/test_snapshot_shows_basic_conversation.svg @@ -0,0 +1,205 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SnapshotTestAppWithConversation + + + + + + + + + + +╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ + + + + + +╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + +Hello there, who are you? + +● I'm the Vibe agent and I'm ready to help. + + + + + + + + + + + + + + + + +⏵ auto-approve off (shift+tab to toggle) + +╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +>Ask anything... +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + +/test/workdir12% of 100k tokens + + + + diff --git a/tests/snapshots/__snapshots__/test_ui_snapshot_code_block_horizontal_scrolling/test_snapshot_allows_horizontal_scrolling_for_long_code_blocks.svg b/tests/snapshots/__snapshots__/test_ui_snapshot_code_block_horizontal_scrolling/test_snapshot_allows_horizontal_scrolling_for_long_code_blocks.svg new file mode 100644 index 0000000..b725549 --- /dev/null +++ b/tests/snapshots/__snapshots__/test_ui_snapshot_code_block_horizontal_scrolling/test_snapshot_allows_horizontal_scrolling_for_long_code_blocks.svg @@ -0,0 +1,213 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BaseSnapshotTestApp + + + + + + + + + + +╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ + + + + + +╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + + +● Here's a very long print instruction: + + +um(): + very long line (Lorem Ipsum) +rem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore  + + + +The print statement includes a very long line of Lorem Ipsum text to demonstrate a lengthy output. + + + + + + + + +⏵ auto-approve off (shift+tab to toggle) + +╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +>Ask anything... +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + +/test/workdir0% of 100k tokens + + + + diff --git a/tests/snapshots/__snapshots__/test_ui_snapshot_release_update_notification/test_snapshot_shows_release_update_notification.svg b/tests/snapshots/__snapshots__/test_ui_snapshot_release_update_notification/test_snapshot_shows_release_update_notification.svg new file mode 100644 index 0000000..de089f4 --- /dev/null +++ b/tests/snapshots/__snapshots__/test_ui_snapshot_release_update_notification/test_snapshot_shows_release_update_notification.svg @@ -0,0 +1,205 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SnapshotTestAppWithUpdate + + + + + + + + + + +╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ + + + + + +╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + + + + + + + + + + + + + + + + + + +Update available +0.1.0 => 0.2.0 +Run "uv tool upgrade mistral-vibe" to update + +╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +>Ask anything... +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + +/test/workdir0% of 100k tokens + + + + diff --git a/tests/snapshots/base_snapshot_test_app.py b/tests/snapshots/base_snapshot_test_app.py new file mode 100644 index 0000000..7d002fe --- /dev/null +++ b/tests/snapshots/base_snapshot_test_app.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from rich.style import Style +from textual.widgets.text_area import TextAreaTheme + +from tests.stubs.fake_backend import FakeBackend +from vibe.cli.textual_ui.app import VibeApp +from vibe.cli.textual_ui.widgets.chat_input import ChatTextArea +from vibe.core.agent import Agent +from vibe.core.config import SessionLoggingConfig, VibeConfig + + +def default_config() -> VibeConfig: + """Default configuration for snapshot testing. + Remove as much interference as possible from the snapshot comparison, in order to get a clean pixel-to-pixel comparison. + - Injects a fake backend to prevent (or stub) LLM calls. + - Disables the welcome banner animation. + - Forces a value for the displayed workdir + - Hides the chat input cursor (as the blinking animation is not deterministic). + """ + return VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), + textual_theme="gruvbox", + disable_welcome_banner_animation=True, + displayed_workdir="/test/workdir", + ) + + +class BaseSnapshotTestApp(VibeApp): + CSS_PATH = "../../vibe/cli/textual_ui/app.tcss" + + def __init__(self, config: VibeConfig | None = None, **kwargs): + config = config or default_config() + + super().__init__(config=config, **kwargs) + + self.agent = Agent( + config, + auto_approve=self.auto_approve, + enable_streaming=self.enable_streaming, + backend=FakeBackend(), + ) + + async def on_mount(self) -> None: + await super().on_mount() + self._hide_chat_input_cursor() + + def _hide_chat_input_cursor(self) -> None: + text_area = self.query_one(ChatTextArea) + hidden_cursor_theme = TextAreaTheme(name="hidden_cursor", cursor_style=Style()) + text_area.register_theme(hidden_cursor_theme) + text_area.theme = "hidden_cursor" diff --git a/tests/snapshots/snap_compare.py b/tests/snapshots/snap_compare.py new file mode 100644 index 0000000..57c7933 --- /dev/null +++ b/tests/snapshots/snap_compare.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +from collections.abc import Awaitable, Callable, Iterable +from pathlib import PurePath +from typing import Protocol + +from textual.app import App +from textual.pilot import Pilot + + +class SnapCompare(Protocol): + def __call__( + self, + app: str | PurePath | App, + /, + *, + press: Iterable[str] = ..., + terminal_size: tuple[int, int] = ..., + run_before: (Callable[[Pilot], Awaitable[None] | None] | None) = ..., + ) -> bool: ... diff --git a/tests/snapshots/test_ui_snapshot_basic_conversation.py b/tests/snapshots/test_ui_snapshot_basic_conversation.py new file mode 100644 index 0000000..e78c3ad --- /dev/null +++ b/tests/snapshots/test_ui_snapshot_basic_conversation.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from textual.pilot import Pilot + +from tests.mock.utils import mock_llm_chunk +from tests.snapshots.base_snapshot_test_app import BaseSnapshotTestApp, default_config +from tests.snapshots.snap_compare import SnapCompare +from tests.stubs.fake_backend import FakeBackend +from vibe.core.agent import Agent + + +class SnapshotTestAppWithConversation(BaseSnapshotTestApp): + def __init__(self) -> None: + config = default_config() + fake_backend = FakeBackend( + results=[ + mock_llm_chunk( + content="I'm the Vibe agent and I'm ready to help.", + prompt_tokens=10_000, + completion_tokens=2_500, + ) + ] + ) + super().__init__(config=config) + self.agent = Agent( + config, + auto_approve=self.auto_approve, + enable_streaming=self.enable_streaming, + backend=fake_backend, + ) + + +def test_snapshot_shows_basic_conversation(snap_compare: SnapCompare) -> None: + async def run_before(pilot: Pilot) -> None: + await pilot.press(*"Hello there, who are you?") + await pilot.press("enter") + await pilot.pause(0.4) + + assert snap_compare( + "test_ui_snapshot_basic_conversation.py:SnapshotTestAppWithConversation", + terminal_size=(120, 36), + run_before=run_before, + ) diff --git a/tests/snapshots/test_ui_snapshot_code_block_horizontal_scrolling.py b/tests/snapshots/test_ui_snapshot_code_block_horizontal_scrolling.py new file mode 100644 index 0000000..97d1058 --- /dev/null +++ b/tests/snapshots/test_ui_snapshot_code_block_horizontal_scrolling.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from textual.pilot import Pilot +from textual.widgets.markdown import MarkdownFence + +from tests.snapshots.snap_compare import SnapCompare +from vibe.cli.textual_ui.widgets.messages import AssistantMessage + + +def test_snapshot_allows_horizontal_scrolling_for_long_code_blocks( + snap_compare: SnapCompare, +) -> None: + assistant_message_md = """Here's a very long print instruction: + +```python +def lorem_ipsum(): + # Print a very long line (Lorem Ipsum) + print("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem.") +``` + +The `print` statement includes a very long line of Lorem Ipsum text to demonstrate a lengthy output.""" + + async def run_before(pilot: Pilot) -> None: + app = pilot.app + assistant_message = AssistantMessage(assistant_message_md) + messages_area = app.query_one("#messages") + await messages_area.mount(assistant_message) + await assistant_message.write_initial_content() + await pilot.pause(0.1) + + markdown_fence = app.query_one(MarkdownFence) + markdown_fence.scroll_relative(x=15, immediate=True) + + assert snap_compare( + "base_snapshot_test_app.py:BaseSnapshotTestApp", + run_before=run_before, + terminal_size=(120, 36), + ) diff --git a/tests/snapshots/test_ui_snapshot_release_update_notification.py b/tests/snapshots/test_ui_snapshot_release_update_notification.py new file mode 100644 index 0000000..38c30fc --- /dev/null +++ b/tests/snapshots/test_ui_snapshot_release_update_notification.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from textual.pilot import Pilot + +from tests.snapshots.base_snapshot_test_app import BaseSnapshotTestApp, default_config +from tests.snapshots.snap_compare import SnapCompare +from vibe.cli.update_notifier import FakeVersionUpdateGateway, VersionUpdate + + +class SnapshotTestAppWithUpdate(BaseSnapshotTestApp): + def __init__(self): + config = default_config() + config.enable_update_checks = True + version_update_notifier = FakeVersionUpdateGateway( + update=VersionUpdate(latest_version="0.2.0") + ) + super().__init__(config=config, version_update_notifier=version_update_notifier) + + +def test_snapshot_shows_release_update_notification(snap_compare: SnapCompare) -> None: + async def run_before(pilot: Pilot) -> None: + await pilot.pause(0.2) + + assert snap_compare( + "test_ui_snapshot_release_update_notification.py:SnapshotTestAppWithUpdate", + terminal_size=(120, 36), + run_before=run_before, + ) diff --git a/tests/stubs/fake_backend.py b/tests/stubs/fake_backend.py new file mode 100644 index 0000000..f788d20 --- /dev/null +++ b/tests/stubs/fake_backend.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +from collections.abc import AsyncGenerator, Callable, Iterable + +from tests.mock.utils import mock_llm_chunk +from vibe.core.types import LLMChunk, LLMMessage + + +class FakeBackend: + """Minimal async backend stub to drive Agent.act without network. + + Provide a finite sequence of LLMResult objects to be returned by + `complete`. When exhausted, returns an empty assistant message. + """ + + def __init__( + self, + results: Iterable[LLMChunk] | None = None, + *, + token_counter: Callable[[list[LLMMessage]], int] | None = None, + exception_to_raise: Exception | None = None, + ) -> None: + self._chunks = list(results or []) + self._requests_messages: list[list[LLMMessage]] = [] + self._requests_extra_headers: list[dict[str, str] | None] = [] + self._count_tokens_calls: list[list[LLMMessage]] = [] + self._token_counter = token_counter or self._default_token_counter + self._exception_to_raise = exception_to_raise + + @property + def requests_messages(self) -> list[list[LLMMessage]]: + return self._requests_messages + + @property + def requests_extra_headers(self) -> list[dict[str, str] | None]: + return self._requests_extra_headers + + @staticmethod + def _default_token_counter(messages: list[LLMMessage]) -> int: + return 1 + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + return None + + async def complete( + self, + *, + model, + messages, + temperature, + tools, + tool_choice, + extra_headers, + max_tokens, + ) -> LLMChunk: + if self._exception_to_raise: + raise self._exception_to_raise + + self._requests_messages.append(messages) + self._requests_extra_headers.append(extra_headers) + if self._chunks: + chunk = self._chunks.pop(0) + if not self._chunks: + chunk = chunk.model_copy(update={"finish_reason": "stop"}) + return chunk + return mock_llm_chunk(content="", finish_reason="stop") + + async def complete_streaming( + self, + *, + model, + messages, + temperature, + tools, + tool_choice, + extra_headers, + max_tokens, + ) -> AsyncGenerator[LLMChunk]: + if self._exception_to_raise: + raise self._exception_to_raise + + self._requests_messages.append(messages) + self._requests_extra_headers.append(extra_headers) + has_final_chunk = False + while self._chunks: + chunk = self._chunks.pop(0) + is_last_provided_chunk = not self._chunks + if is_last_provided_chunk: + chunk = chunk.model_copy(update={"finish_reason": "stop"}) + + if chunk.finish_reason is not None: + has_final_chunk = True + + yield chunk + if has_final_chunk: + break + + if not has_final_chunk: + yield mock_llm_chunk(content="", finish_reason="stop") + + async def count_tokens( + self, + *, + model, + messages, + temperature=0.0, + tools, + tool_choice=None, + extra_headers, + ) -> int: + self._count_tokens_calls.append(list(messages)) + return self._token_counter(messages) diff --git a/tests/stubs/fake_connection.py b/tests/stubs/fake_connection.py new file mode 100644 index 0000000..c4ad663 --- /dev/null +++ b/tests/stubs/fake_connection.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +from collections.abc import Callable +from typing import Any + +from acp import ( + Agent, + AgentSideConnection, + CreateTerminalRequest, + KillTerminalCommandRequest, + KillTerminalCommandResponse, + ReadTextFileRequest, + ReadTextFileResponse, + ReleaseTerminalRequest, + ReleaseTerminalResponse, + RequestPermissionRequest, + RequestPermissionResponse, + SessionNotification, + TerminalHandle, + TerminalOutputRequest, + TerminalOutputResponse, + WaitForTerminalExitRequest, + WaitForTerminalExitResponse, + WriteTextFileRequest, + WriteTextFileResponse, +) + + +class FakeAgentSideConnection(AgentSideConnection): + def __init__(self, to_agent: Callable[[AgentSideConnection], Agent]) -> None: + self._session_updates = [] + to_agent(self) + + async def sessionUpdate(self, params: SessionNotification) -> None: + self._session_updates.append(params) + + async def requestPermission( + self, params: RequestPermissionRequest + ) -> RequestPermissionResponse: + raise NotImplementedError() + + async def readTextFile(self, params: ReadTextFileRequest) -> ReadTextFileResponse: + raise NotImplementedError() + + async def writeTextFile( + self, params: WriteTextFileRequest + ) -> WriteTextFileResponse | None: + raise NotImplementedError() + + async def createTerminal(self, params: CreateTerminalRequest) -> TerminalHandle: + raise NotImplementedError() + + async def terminalOutput( + self, params: TerminalOutputRequest + ) -> TerminalOutputResponse: + raise NotImplementedError() + + async def releaseTerminal( + self, params: ReleaseTerminalRequest + ) -> ReleaseTerminalResponse | None: + raise NotImplementedError() + + async def waitForTerminalExit( + self, params: WaitForTerminalExitRequest + ) -> WaitForTerminalExitResponse: + raise NotImplementedError() + + async def killTerminal( + self, params: KillTerminalCommandRequest + ) -> KillTerminalCommandResponse | None: + raise NotImplementedError() + + async def extMethod(self, method: str, params: dict[str, Any]) -> dict[str, Any]: + raise NotImplementedError() + + async def extNotification(self, method: str, params: dict[str, Any]) -> None: + raise NotImplementedError() + + async def close(self) -> None: + raise NotImplementedError() + + async def __aenter__(self) -> AgentSideConnection: + return self + + async def __aexit__(self, exc_type, exc, tb) -> None: + await self.close() diff --git a/tests/stubs/fake_tool.py b/tests/stubs/fake_tool.py new file mode 100644 index 0000000..4c40b56 --- /dev/null +++ b/tests/stubs/fake_tool.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from pydantic import BaseModel + +from vibe.core.tools.base import BaseTool, BaseToolConfig, BaseToolState + + +class FakeToolArgs(BaseModel): + pass + + +class FakeToolResult(BaseModel): + message: str = "fake tool executed" + + +class FakeToolState(BaseToolState): + pass + + +class FakeTool(BaseTool[FakeToolArgs, FakeToolResult, BaseToolConfig, FakeToolState]): + _exception_to_raise: BaseException | None = None + + @classmethod + def get_name(cls) -> str: + return "stub_tool" + + async def run(self, args: FakeToolArgs) -> FakeToolResult: + if self._exception_to_raise: + raise self._exception_to_raise + return FakeToolResult() diff --git a/tests/test_agent_auto_compact.py b/tests/test_agent_auto_compact.py new file mode 100644 index 0000000..5403636 --- /dev/null +++ b/tests/test_agent_auto_compact.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import pytest + +from tests.mock.utils import mock_llm_chunk +from tests.stubs.fake_backend import FakeBackend +from vibe.core.agent import Agent +from vibe.core.config import SessionLoggingConfig, VibeConfig +from vibe.core.types import ( + AssistantEvent, + CompactEndEvent, + CompactStartEvent, + LLMMessage, + Role, +) + + +@pytest.mark.asyncio +async def test_auto_compact_triggers_and_batches_observer() -> None: + observed: list[tuple[Role, str | None]] = [] + + def observer(msg: LLMMessage) -> None: + observed.append((msg.role, msg.content)) + + backend = FakeBackend([ + mock_llm_chunk(content=""), + mock_llm_chunk(content=""), + ]) + cfg = VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), auto_compact_threshold=1 + ) + agent = Agent(cfg, message_observer=observer, backend=backend) + agent.stats.context_tokens = 2 + + events = [ev async for ev in agent.act("Hello")] + + assert len(events) == 3 + assert isinstance(events[0], CompactStartEvent) + assert isinstance(events[1], CompactEndEvent) + assert isinstance(events[2], AssistantEvent) + start: CompactStartEvent = events[0] + end: CompactEndEvent = events[1] + final: AssistantEvent = events[2] + assert start.current_context_tokens == 2 + assert start.threshold == 1 + assert end.old_context_tokens == 2 + assert end.new_context_tokens >= 1 + assert final.content == "" + + roles = [r for r, _ in observed] + assert roles == [Role.system, Role.user, Role.assistant] + assert ( + observed[1][1] is not None + and "Last request from user was: Hello" in observed[1][1] + ) + assert observed[2][1] == "" diff --git a/tests/test_agent_backend.py b/tests/test_agent_backend.py new file mode 100644 index 0000000..ec04917 --- /dev/null +++ b/tests/test_agent_backend.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import pytest + +from tests.mock.utils import mock_llm_chunk +from tests.stubs.fake_backend import FakeBackend +from vibe.core.agent import Agent +from vibe.core.config import SessionLoggingConfig, VibeConfig + + +@pytest.fixture +def vibe_config() -> VibeConfig: + return VibeConfig(session_logging=SessionLoggingConfig(enabled=False)) + + +@pytest.mark.asyncio +async def test_passes_x_affinity_header_when_asking_an_answer(vibe_config: VibeConfig): + backend = FakeBackend([mock_llm_chunk(content="Response", finish_reason="stop")]) + agent = Agent(vibe_config, backend=backend) + + [_ async for _ in agent.act("Hello")] + + assert len(backend.requests_extra_headers) > 0 + headers = backend.requests_extra_headers[0] + assert headers is not None + assert "x-affinity" in headers + assert headers["x-affinity"] == agent.session_id + + +@pytest.mark.asyncio +async def test_passes_x_affinity_header_when_asking_an_answer_streaming( + vibe_config: VibeConfig, +): + backend = FakeBackend([mock_llm_chunk(content="Response", finish_reason="stop")]) + agent = Agent(vibe_config, backend=backend, enable_streaming=True) + + [_ async for _ in agent.act("Hello")] + + assert len(backend.requests_extra_headers) > 0 + headers = backend.requests_extra_headers[0] + assert headers is not None + assert "x-affinity" in headers + assert headers["x-affinity"] == agent.session_id + + +@pytest.mark.asyncio +async def test_updates_tokens_stats_based_on_backend_response(vibe_config: VibeConfig): + chunk = mock_llm_chunk( + content="Response", + finish_reason="stop", + prompt_tokens=100, + completion_tokens=50, + ) + backend = FakeBackend([chunk]) + agent = Agent(vibe_config, backend=backend) + + [_ async for _ in agent.act("Hello")] + + assert agent.stats.context_tokens == 150 + + +@pytest.mark.asyncio +async def test_updates_tokens_stats_based_on_backend_response_streaming( + vibe_config: VibeConfig, +): + final_chunk = mock_llm_chunk( + content="Complete", + finish_reason="stop", + prompt_tokens=200, + completion_tokens=75, + ) + backend = FakeBackend([final_chunk]) + agent = Agent(vibe_config, backend=backend, enable_streaming=True) + + [_ async for _ in agent.act("Hello")] + + assert agent.stats.context_tokens == 275 diff --git a/tests/test_agent_observer_streaming.py b/tests/test_agent_observer_streaming.py new file mode 100644 index 0000000..0c9dfa8 --- /dev/null +++ b/tests/test_agent_observer_streaming.py @@ -0,0 +1,430 @@ +from __future__ import annotations + +from collections.abc import Callable +from typing import cast +from unittest.mock import AsyncMock + +import pytest + +from tests.mock.utils import mock_llm_chunk +from tests.stubs.fake_backend import FakeBackend +from vibe.core.agent import Agent +from vibe.core.config import SessionLoggingConfig, VibeConfig +from vibe.core.llm.types import BackendLike +from vibe.core.middleware import ( + ConversationContext, + MiddlewareAction, + MiddlewarePipeline, + MiddlewareResult, + ResetReason, +) +from vibe.core.tools.base import BaseToolConfig, ToolPermission +from vibe.core.tools.builtins.todo import TodoArgs +from vibe.core.types import ( + AssistantEvent, + FunctionCall, + LLMChunk, + LLMMessage, + Role, + ToolCall, + ToolCallEvent, + ToolResultEvent, +) +from vibe.core.utils import ( + ApprovalResponse, + CancellationReason, + get_user_cancellation_message, +) + + +class InjectBeforeMiddleware: + injectedMessage = "" + + async def before_turn(self, context: ConversationContext) -> MiddlewareResult: + "Inject a message just before the current step executes." + return MiddlewareResult( + action=MiddlewareAction.INJECT_MESSAGE, message=self.injectedMessage + ) + + async def after_turn(self, context: ConversationContext) -> MiddlewareResult: + return MiddlewareResult() + + def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: + return None + + +def make_config( + *, + disable_logging: bool = True, + enabled_tools: list[str] | None = None, + tools: dict[str, BaseToolConfig] | None = None, +) -> VibeConfig: + cfg = VibeConfig( + session_logging=SessionLoggingConfig(enabled=not disable_logging), + auto_compact_threshold=0, + system_prompt_id="tests", + include_project_context=False, + include_prompt_detail=False, + include_model_info=False, + enabled_tools=enabled_tools or [], + tools=tools or {}, + ) + return cfg + + +@pytest.fixture +def observer_capture() -> tuple[ + list[tuple[Role, str | None]], Callable[[LLMMessage], None] +]: + observed: list[tuple[Role, str | None]] = [] + + def observer(msg: LLMMessage) -> None: + observed.append((msg.role, msg.content)) + + return observed, observer + + +@pytest.mark.asyncio +async def test_act_flushes_batched_messages_with_injection_middleware( + observer_capture, +) -> None: + observed, observer = observer_capture + + backend = FakeBackend([mock_llm_chunk(content="I can write very efficient code.")]) + agent = Agent(make_config(), message_observer=observer, backend=backend) + agent.middleware_pipeline.add(InjectBeforeMiddleware()) + + async for _ in agent.act("How can you help?"): + pass + + assert len(observed) == 3 + assert [r for r, _ in observed] == [Role.system, Role.user, Role.assistant] + assert observed[0][1] == "You are Vibe, a super useful programming assistant." + # injected content should be appended to the user's message before emission + assert ( + observed[1][1] + == f"How can you help?\n\n{InjectBeforeMiddleware.injectedMessage}" + ) + assert observed[2][1] == "I can write very efficient code." + + +@pytest.mark.asyncio +async def test_stop_action_flushes_user_msg_before_returning(observer_capture) -> None: + observed, observer = observer_capture + + # max_turns=0 forces an immediate STOP on the first before_turn + backend = FakeBackend([ + mock_llm_chunk(content="My response will never reach you...") + ]) + agent = Agent( + make_config(), message_observer=observer, max_turns=0, backend=backend + ) + + async for _ in agent.act("Greet."): + pass + + assert len(observed) == 2 + # user's message should have been flushed before returning + assert [r for r, _ in observed] == [Role.system, Role.user] + assert observed[0][1] == "You are Vibe, a super useful programming assistant." + assert observed[1][1] == "Greet." + + +@pytest.mark.asyncio +async def test_act_emits_user_and_assistant_msgs(observer_capture) -> None: + observed, observer = observer_capture + + backend = FakeBackend([mock_llm_chunk(content="Pong!")]) + agent = Agent(make_config(), message_observer=observer, backend=backend) + + async for _ in agent.act("Ping?"): + pass + + assert len(observed) == 3 + assert [r for r, _ in observed] == [Role.system, Role.user, Role.assistant] + assert observed[1][1] == "Ping?" + assert observed[2][1] == "Pong!" + + +@pytest.mark.asyncio +async def test_act_yields_assistant_event_with_usage_stats() -> None: + backend = FakeBackend([mock_llm_chunk(content="Pong!")]) + agent = Agent(make_config(), backend=backend) + + events = [ev async for ev in agent.act("Ping?")] + + assert len(events) == 1 + ev = events[-1] + assert isinstance(ev, AssistantEvent) + assert ev.content == "Pong!" + # stats come from tests.mock.utils.mock_llm_result (prompt=10, completion=5) + assert ev.prompt_tokens == 10 + assert ev.completion_tokens == 5 + assert ev.session_total_tokens == 15 + + +@pytest.mark.asyncio +async def test_act_streams_batched_chunks_in_order() -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Hello"), + mock_llm_chunk(content=" from"), + mock_llm_chunk(content=" Vibe"), + mock_llm_chunk(content="! "), + mock_llm_chunk(content="More"), + mock_llm_chunk(content=" and"), + mock_llm_chunk(content=" end"), + ]) + agent = Agent(make_config(), backend=backend, enable_streaming=True) + + events = [event async for event in agent.act("Stream, please.")] + + assert len(events) == 2 + assert [event.content for event in events if isinstance(event, AssistantEvent)] == [ + "Hello from Vibe! More", + " and end", + ] + assert agent.messages[-1].role == Role.assistant + assert agent.messages[-1].content == "Hello from Vibe! More and end" + + +@pytest.mark.asyncio +async def test_act_handles_streaming_with_tool_call_events_in_sequence() -> None: + todo_tool_call = ToolCall( + id="tc_stream", + index=0, + function=FunctionCall(name="todo", arguments='{"action": "read"}'), + ) + backend = FakeBackend([ + mock_llm_chunk(content="Checking your todos."), + mock_llm_chunk(content="", tool_calls=[todo_tool_call]), + mock_llm_chunk(content="", finish_reason="stop"), + mock_llm_chunk(content="Done reviewing todos."), + ]) + agent = Agent( + make_config( + enabled_tools=["todo"], + tools={"todo": BaseToolConfig(permission=ToolPermission.ALWAYS)}, + ), + backend=backend, + auto_approve=True, + enable_streaming=True, + ) + + events = [event async for event in agent.act("What about my todos?")] + + assert [type(event) for event in events] == [ + AssistantEvent, + ToolCallEvent, + ToolResultEvent, + AssistantEvent, + ] + assert isinstance(events[0], AssistantEvent) + assert events[0].content == "Checking your todos." + assert isinstance(events[1], ToolCallEvent) + assert events[1].tool_name == "todo" + assert isinstance(events[2], ToolResultEvent) + assert events[2].error is None + assert events[2].skipped is False + assert isinstance(events[3], AssistantEvent) + assert events[3].content == "Done reviewing todos." + assert agent.messages[-1].content == "Done reviewing todos." + + +@pytest.mark.asyncio +async def test_act_handles_tool_call_chunk_with_content() -> None: + todo_tool_call = ToolCall( + id="tc_content", + index=0, + function=FunctionCall(name="todo", arguments='{"action": "read"}'), + ) + backend = FakeBackend([ + mock_llm_chunk(content="Preparing "), + mock_llm_chunk(content="todo request", tool_calls=[todo_tool_call]), + mock_llm_chunk(content=" complete", finish_reason="stop"), + ]) + agent = Agent( + make_config( + enabled_tools=["todo"], + tools={"todo": BaseToolConfig(permission=ToolPermission.ALWAYS)}, + ), + backend=backend, + auto_approve=True, + enable_streaming=True, + ) + + events = [event async for event in agent.act("Check todos with content.")] + + assert [type(event) for event in events] == [ + AssistantEvent, + AssistantEvent, + ToolCallEvent, + ToolResultEvent, + ] + assert isinstance(events[0], AssistantEvent) + assert events[0].content == "Preparing todo request" + assert isinstance(events[1], AssistantEvent) + assert events[1].content == " complete" + assert any( + m.role == Role.assistant and m.content == "Preparing todo request complete" + for m in agent.messages + ) + + +@pytest.mark.asyncio +async def test_act_merges_streamed_tool_call_arguments() -> None: + tool_call_part_one = ToolCall( + id="tc_merge", + index=0, + function=FunctionCall( + name="todo", arguments='{"action": "read", "note": "First ' + ), + ) + tool_call_part_two = ToolCall( + id="tc_merge", index=0, function=FunctionCall(name="todo", arguments='part"}') + ) + backend = FakeBackend([ + mock_llm_chunk(content="Planning: "), + mock_llm_chunk(content="", tool_calls=[tool_call_part_one]), + mock_llm_chunk(content="", tool_calls=[tool_call_part_two]), + ]) + agent = Agent( + make_config( + enabled_tools=["todo"], + tools={"todo": BaseToolConfig(permission=ToolPermission.ALWAYS)}, + ), + backend=backend, + auto_approve=True, + enable_streaming=True, + ) + + events = [event async for event in agent.act("Merge streamed tool call args.")] + + assert [type(event) for event in events] == [ + AssistantEvent, + ToolCallEvent, + ToolResultEvent, + ] + call_event = events[1] + assert isinstance(call_event, ToolCallEvent) + assert call_event.tool_call_id == "tc_merge" + call_args = cast(TodoArgs, call_event.args) + assert call_args.action == "read" + assert isinstance(events[2], ToolResultEvent) + assert events[2].error is None + assert events[2].skipped is False + assistant_with_calls = next( + m for m in agent.messages if m.role == Role.assistant and m.tool_calls + ) + reconstructed_calls = assistant_with_calls.tool_calls or [] + assert len(reconstructed_calls) == 1 + assert reconstructed_calls[0].function.arguments == ( + '{"action": "read", "note": "First part"}' + ) + + +@pytest.mark.asyncio +async def test_act_raises_when_stream_never_signals_finish() -> None: + class IncompleteStreamingBackend(BackendLike): + def __init__(self, chunks: list[LLMChunk]) -> None: + self._chunks = list(chunks) + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + return None + + async def complete_streaming(self, **_: object): + while self._chunks: + yield self._chunks.pop(0) + + async def complete(self, **_: object): + return mock_llm_chunk(content="", finish_reason="stop") + + async def count_tokens(self, **_: object) -> int: + return 0 + + backend = IncompleteStreamingBackend([mock_llm_chunk(content="partial")]) + agent = Agent(make_config(), backend=backend, enable_streaming=True) + + with pytest.raises(RuntimeError, match="Streamed completion returned no chunks"): + [event async for event in agent.act("Will this finish?")] + + +@pytest.mark.asyncio +async def test_act_handles_user_cancellation_during_streaming() -> None: + class CountingMiddleware(MiddlewarePipeline): + def __init__(self) -> None: + self.before_calls = 0 + self.after_calls = 0 + + async def before_turn(self, context: ConversationContext) -> MiddlewareResult: + self.before_calls += 1 + return MiddlewareResult() + + async def after_turn(self, context: ConversationContext) -> MiddlewareResult: + self.after_calls += 1 + return MiddlewareResult() + + def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: + return None + + todo_tool_call = ToolCall( + id="tc_cancel", + index=0, + function=FunctionCall(name="todo", arguments='{"action": "read"}'), + ) + backend = FakeBackend([ + mock_llm_chunk(content="Preparing "), + mock_llm_chunk(content="todo request", tool_calls=[todo_tool_call]), + mock_llm_chunk(content="", finish_reason="stop"), + ]) + agent = Agent( + make_config( + enabled_tools=["todo"], + tools={"todo": BaseToolConfig(permission=ToolPermission.ASK)}, + ), + backend=backend, + auto_approve=False, + enable_streaming=True, + ) + middleware = CountingMiddleware() + agent.middleware_pipeline.add(middleware) + agent.set_approval_callback( + lambda _name, _args, _id: ( + ApprovalResponse.NO, + str(get_user_cancellation_message(CancellationReason.OPERATION_CANCELLED)), + ) + ) + agent.interaction_logger.save_interaction = AsyncMock(return_value=None) + + events = [event async for event in agent.act("Cancel mid stream?")] + + assert [type(event) for event in events] == [ + AssistantEvent, + ToolCallEvent, + ToolResultEvent, + ] + assert middleware.before_calls == 1 + assert middleware.after_calls == 0 + assert isinstance(events[-1], ToolResultEvent) + assert events[-1].skipped is True + assert events[-1].skip_reason is not None + assert "" in events[-1].skip_reason + assert agent.interaction_logger.save_interaction.await_count == 2 + + +@pytest.mark.asyncio +async def test_act_flushes_and_logs_when_streaming_errors(observer_capture) -> None: + observed, observer = observer_capture + backend = FakeBackend(exception_to_raise=RuntimeError("boom in streaming")) + agent = Agent( + make_config(), backend=backend, message_observer=observer, enable_streaming=True + ) + agent.interaction_logger.save_interaction = AsyncMock(return_value=None) + + with pytest.raises(RuntimeError, match="boom in streaming"): + [_ async for _ in agent.act("Trigger stream failure")] + + assert [role for role, _ in observed] == [Role.system, Role.user] + assert agent.interaction_logger.save_interaction.await_count == 1 diff --git a/tests/test_agent_stats.py b/tests/test_agent_stats.py new file mode 100644 index 0000000..30da3fb --- /dev/null +++ b/tests/test_agent_stats.py @@ -0,0 +1,711 @@ +from __future__ import annotations + +from collections.abc import Callable + +import pytest + +from tests.mock.utils import mock_llm_chunk +from tests.stubs.fake_backend import FakeBackend +from vibe.core.agent import Agent +from vibe.core.config import ( + Backend, + ModelConfig, + ProviderConfig, + SessionLoggingConfig, + VibeConfig, +) +from vibe.core.tools.base import BaseToolConfig, ToolPermission +from vibe.core.types import ( + AgentStats, + AssistantEvent, + CompactEndEvent, + CompactStartEvent, + FunctionCall, + LLMMessage, + Role, + ToolCall, +) + + +def make_config( + *, + system_prompt_id: str = "tests", + active_model: str = "devstral-latest", + input_price: float = 0.4, + output_price: float = 2.0, + disable_logging: bool = True, + auto_compact_threshold: int = 0, + include_project_context: bool = False, + include_prompt_detail: bool = False, + enabled_tools: list[str] | None = None, + todo_permission: ToolPermission = ToolPermission.ALWAYS, +) -> VibeConfig: + models = [ + ModelConfig( + name="mistral-vibe-cli-latest", + provider="mistral", + alias="devstral-latest", + input_price=input_price, + output_price=output_price, + ), + ModelConfig( + name="devstral-small-latest", + provider="mistral", + alias="devstral-small", + input_price=0.1, + output_price=0.3, + ), + ModelConfig( + name="strawberry", + provider="lechat", + alias="strawberry", + input_price=2.5, + output_price=10.0, + ), + ] + providers = [ + ProviderConfig( + name="mistral", + api_base="https://api.mistral.ai/v1", + api_key_env_var="MISTRAL_API_KEY", + backend=Backend.MISTRAL, + ), + ProviderConfig( + name="lechat", + api_base="https://api.mistral.ai/v1", + api_key_env_var="LECHAT_API_KEY", + backend=Backend.MISTRAL, + ), + ] + return VibeConfig( + session_logging=SessionLoggingConfig(enabled=not disable_logging), + auto_compact_threshold=auto_compact_threshold, + system_prompt_id=system_prompt_id, + include_project_context=include_project_context, + include_prompt_detail=include_prompt_detail, + active_model=active_model, + models=models, + providers=providers, + enabled_tools=enabled_tools or [], + tools={"todo": BaseToolConfig(permission=todo_permission)}, + ) + + +@pytest.fixture +def observer_capture() -> tuple[list[LLMMessage], Callable[[LLMMessage], None]]: + observed: list[LLMMessage] = [] + + def observer(msg: LLMMessage) -> None: + observed.append(msg) + + return observed, observer + + +class TestAgentStatsHelpers: + def test_update_pricing(self) -> None: + stats = AgentStats() + stats.update_pricing(1.5, 3.0) + assert stats.input_price_per_million == 1.5 + assert stats.output_price_per_million == 3.0 + + def test_reset_context_state_preserves_cumulative(self) -> None: + stats = AgentStats( + steps=5, + session_prompt_tokens=1000, + session_completion_tokens=500, + tool_calls_succeeded=3, + tool_calls_failed=1, + context_tokens=800, + last_turn_prompt_tokens=100, + last_turn_completion_tokens=50, + last_turn_duration=1.5, + tokens_per_second=33.3, + input_price_per_million=0.4, + output_price_per_million=2.0, + ) + + stats.reset_context_state() + + assert stats.steps == 5 + assert stats.session_prompt_tokens == 1000 + assert stats.session_completion_tokens == 500 + assert stats.tool_calls_succeeded == 3 + assert stats.tool_calls_failed == 1 + assert stats.input_price_per_million == 0.4 + assert stats.output_price_per_million == 2.0 + + assert stats.context_tokens == 0 + assert stats.last_turn_prompt_tokens == 0 + assert stats.last_turn_completion_tokens == 0 + assert stats.last_turn_duration == 0.0 + assert stats.tokens_per_second == 0.0 + + def test_session_cost_computed_from_current_pricing(self) -> None: + stats = AgentStats( + session_prompt_tokens=1_000_000, + session_completion_tokens=500_000, + input_price_per_million=1.0, + output_price_per_million=2.0, + ) + # Cost = 1M * $1/M + 0.5M * $2/M = $1 + $1 = $2 + assert stats.session_cost == 2.0 + + stats.update_pricing(2.0, 4.0) + # Cost = 1M * $2/M + 0.5M * $4/M = $2 + $2 = $4 + assert stats.session_cost == 4.0 + + +class TestReloadPreservesStats: + @pytest.mark.asyncio + async def test_reload_preserves_session_tokens(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="First response", finish_reason="stop") + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("Hello"): + pass + + old_session_prompt = agent.stats.session_prompt_tokens + old_session_completion = agent.stats.session_completion_tokens + assert old_session_prompt > 0 + assert old_session_completion > 0 + + await agent.reload_with_initial_messages() + + assert agent.stats.session_prompt_tokens == old_session_prompt + assert agent.stats.session_completion_tokens == old_session_completion + + @pytest.mark.asyncio + async def test_reload_preserves_tool_call_stats(self) -> None: + backend = FakeBackend([ + mock_llm_chunk( + content="Calling tool", + tool_calls=[ + ToolCall( + id="tc1", + function=FunctionCall( + name="todo", arguments='{"action": "read"}' + ), + ) + ], + ), + mock_llm_chunk(content="Done", finish_reason="stop"), + ]) + config = make_config(enabled_tools=["todo"]) + agent = Agent(config, auto_approve=True, backend=backend) + + async for _ in agent.act("Check todos"): + pass + + assert agent.stats.tool_calls_succeeded == 1 + assert agent.stats.tool_calls_agreed == 1 + + await agent.reload_with_initial_messages() + + assert agent.stats.tool_calls_succeeded == 1 + assert agent.stats.tool_calls_agreed == 1 + + @pytest.mark.asyncio + async def test_reload_preserves_steps(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="R1", finish_reason="stop"), + mock_llm_chunk(content="R2", finish_reason="stop"), + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("First"): + pass + async for _ in agent.act("Second"): + pass + + old_steps = agent.stats.steps + assert old_steps >= 2 + + await agent.reload_with_initial_messages() + + assert agent.stats.steps == old_steps + + @pytest.mark.asyncio + async def test_reload_preserves_context_tokens_when_messages_preserved( + self, + ) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + agent = Agent(make_config(), backend=backend) + [_ async for _ in agent.act("Hello")] + assert agent.stats.context_tokens > 0 + initial_context_tokens = agent.stats.context_tokens + assert len(agent.messages) > 1 + + await agent.reload_with_initial_messages() + + assert len(agent.messages) > 1 + assert agent.stats.context_tokens == initial_context_tokens + + @pytest.mark.asyncio + async def test_reload_resets_context_tokens_when_no_messages(self) -> None: + backend = FakeBackend([]) + agent = Agent(make_config(), backend=backend) + assert len(agent.messages) == 1 + assert agent.stats.context_tokens == 0 + + await agent.reload_with_initial_messages() + + assert len(agent.messages) == 1 + assert agent.stats.context_tokens == 0 + + @pytest.mark.asyncio + async def test_reload_resets_context_tokens_when_system_prompt_changes( + self, + ) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + config1 = make_config(system_prompt_id="tests") + config2 = make_config(system_prompt_id="cli") + agent = Agent(config1, backend=backend) + [_ async for _ in agent.act("Hello")] + assert agent.stats.context_tokens > 0 + assert len(agent.messages) > 1 + + await agent.reload_with_initial_messages(config=config2) + + assert len(agent.messages) > 1 + assert agent.stats.context_tokens == 0 + + @pytest.mark.asyncio + async def test_reload_updates_pricing_from_new_model(self, monkeypatch) -> None: + monkeypatch.setenv("LECHAT_API_KEY", "mock-key") + + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + config_mistral = make_config(active_model="devstral-latest") + agent = Agent(config_mistral, backend=backend) + + async for _ in agent.act("Hello"): + pass + + assert agent.stats.input_price_per_million == 0.4 + assert agent.stats.output_price_per_million == 2.0 + + config_other = make_config(active_model="strawberry") + await agent.reload_with_initial_messages(config=config_other) + + assert agent.stats.input_price_per_million == 2.5 + assert agent.stats.output_price_per_million == 10.0 + + @pytest.mark.asyncio + async def test_reload_accumulates_tokens_across_configs(self, monkeypatch) -> None: + monkeypatch.setenv("LECHAT_API_KEY", "mock-key") + + backend = FakeBackend([ + mock_llm_chunk(content="First", finish_reason="stop"), + mock_llm_chunk(content="After reload", finish_reason="stop"), + ]) + config1 = make_config(active_model="devstral-latest") + agent = Agent(config1, backend=backend) + + async for _ in agent.act("Hello"): + pass + + tokens_after_first = ( + agent.stats.session_prompt_tokens + agent.stats.session_completion_tokens + ) + + config2 = make_config(active_model="strawberry") + await agent.reload_with_initial_messages(config=config2) + + async for _ in agent.act("Continue"): + pass + + tokens_after_second = ( + agent.stats.session_prompt_tokens + agent.stats.session_completion_tokens + ) + assert tokens_after_second > tokens_after_first + + +class TestReloadPreservesMessages: + @pytest.mark.asyncio + async def test_reload_preserves_conversation_messages(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("Hello"): + pass + + assert len(agent.messages) == 3 + old_user_content = agent.messages[1].content + old_assistant_content = agent.messages[2].content + + await agent.reload_with_initial_messages() + + assert len(agent.messages) == 3 + assert agent.messages[0].role == Role.system + assert agent.messages[1].role == Role.user + assert agent.messages[1].content == old_user_content + assert agent.messages[2].role == Role.assistant + assert agent.messages[2].content == old_assistant_content + + @pytest.mark.asyncio + async def test_reload_updates_system_prompt_preserves_rest(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + config1 = make_config(system_prompt_id="tests") + agent = Agent(config1, backend=backend) + + async for _ in agent.act("Hello"): + pass + + old_system = agent.messages[0].content + old_user = agent.messages[1].content + + config2 = make_config(system_prompt_id="cli") + await agent.reload_with_initial_messages(config=config2) + + assert agent.messages[0].content != old_system + assert agent.messages[1].content == old_user + + @pytest.mark.asyncio + async def test_reload_with_no_messages_stays_empty(self) -> None: + backend = FakeBackend([]) + agent = Agent(make_config(), backend=backend) + + assert len(agent.messages) == 1 + + await agent.reload_with_initial_messages() + + assert len(agent.messages) == 1 + assert agent.messages[0].role == Role.system + + @pytest.mark.asyncio + async def test_reload_notifies_observer_with_all_messages( + self, observer_capture + ) -> None: + observed, observer = observer_capture + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + agent = Agent(make_config(), message_observer=observer, backend=backend) + + async for _ in agent.act("Hello"): + pass + + observed.clear() + + await agent.reload_with_initial_messages() + + assert len(observed) == 3 + assert observed[0].role == Role.system + assert observed[1].role == Role.user + assert observed[2].role == Role.assistant + + +class TestCompactStatsHandling: + @pytest.mark.asyncio + async def test_compact_preserves_cumulative_stats(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="First response", finish_reason="stop"), + mock_llm_chunk(content="", finish_reason="stop"), + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("Build something"): + pass + + tokens_before_compact = agent.stats.session_prompt_tokens + completions_before = agent.stats.session_completion_tokens + steps_before = agent.stats.steps + + await agent.compact() + + # Cumulative stats include the compact turn + assert agent.stats.session_prompt_tokens > tokens_before_compact + assert agent.stats.session_completion_tokens > completions_before + assert agent.stats.steps > steps_before + + @pytest.mark.asyncio + async def test_compact_updates_context_tokens(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Long response " * 100, finish_reason="stop"), + mock_llm_chunk(content="", finish_reason="stop"), + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("Do something complex"): + pass + + context_before = agent.stats.context_tokens + + await agent.compact() + + assert agent.stats.context_tokens < context_before + + @pytest.mark.asyncio + async def test_compact_preserves_tool_call_stats(self) -> None: + backend = FakeBackend([ + mock_llm_chunk( + content="Using tool", + tool_calls=[ + ToolCall( + id="tc1", + function=FunctionCall( + name="todo", arguments='{"action": "read"}' + ), + ) + ], + ), + mock_llm_chunk(content="Done", finish_reason="stop"), + mock_llm_chunk(content="", finish_reason="stop"), + ]) + config = make_config(enabled_tools=["todo"]) + agent = Agent(config, auto_approve=True, backend=backend) + + async for _ in agent.act("Check todos"): + pass + + assert agent.stats.tool_calls_succeeded == 1 + + await agent.compact() + + assert agent.stats.tool_calls_succeeded == 1 + + @pytest.mark.asyncio + async def test_compact_resets_session_id(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Long response " * 100, finish_reason="stop"), + mock_llm_chunk(content="", finish_reason="stop"), + ]) + agent = Agent(make_config(disable_logging=False), backend=backend) + + original_session_id = agent.session_id + original_logger_session_id = agent.interaction_logger.session_id + + assert agent.session_id == original_logger_session_id + + async for _ in agent.act("Do something complex"): + pass + + await agent.compact() + + assert agent.session_id != original_session_id + assert agent.session_id == agent.interaction_logger.session_id + + +class TestAutoCompactIntegration: + @pytest.mark.asyncio + async def test_auto_compact_triggers_and_preserves_stats(self) -> None: + observed: list[tuple[Role, str | None]] = [] + + def observer(msg: LLMMessage) -> None: + observed.append((msg.role, msg.content)) + + backend = FakeBackend([ + mock_llm_chunk(content="", finish_reason="stop"), + mock_llm_chunk(content="", finish_reason="stop"), + ]) + cfg = VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), + auto_compact_threshold=1, + ) + agent = Agent(cfg, message_observer=observer, backend=backend) + agent.stats.context_tokens = 2 + + events = [ev async for ev in agent.act("Hello")] + + assert len(events) == 3 + assert isinstance(events[0], CompactStartEvent) + assert isinstance(events[1], CompactEndEvent) + assert isinstance(events[2], AssistantEvent) + + start: CompactStartEvent = events[0] + end: CompactEndEvent = events[1] + final: AssistantEvent = events[2] + + assert start.current_context_tokens == 2 + assert start.threshold == 1 + assert end.old_context_tokens == 2 + assert end.new_context_tokens >= 1 + assert final.content == "" + + roles = [r for r, _ in observed] + assert roles == [Role.system, Role.user, Role.assistant] + assert ( + observed[1][1] is not None + and "Last request from user was: Hello" in observed[1][1] + ) + + +class TestClearHistoryFullReset: + @pytest.mark.asyncio + async def test_clear_history_fully_resets_stats(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("Hello"): + pass + + assert agent.stats.session_prompt_tokens > 0 + assert agent.stats.steps > 0 + + await agent.clear_history() + + assert agent.stats.session_prompt_tokens == 0 + assert agent.stats.session_completion_tokens == 0 + assert agent.stats.steps == 0 + + @pytest.mark.asyncio + async def test_clear_history_preserves_pricing(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + config = make_config(input_price=0.4, output_price=2.0) + agent = Agent(config, backend=backend) + + async for _ in agent.act("Hello"): + pass + + await agent.clear_history() + + assert agent.stats.input_price_per_million == 0.4 + assert agent.stats.output_price_per_million == 2.0 + + @pytest.mark.asyncio + async def test_clear_history_removes_messages(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("Hello"): + pass + + assert len(agent.messages) == 3 + + await agent.clear_history() + + assert len(agent.messages) == 1 + assert agent.messages[0].role == Role.system + + @pytest.mark.asyncio + async def test_clear_history_resets_session_id(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + agent = Agent(make_config(disable_logging=False), backend=backend) + + original_session_id = agent.session_id + original_logger_session_id = agent.interaction_logger.session_id + + assert agent.session_id == original_logger_session_id + + async for _ in agent.act("Hello"): + pass + + await agent.clear_history() + + assert agent.session_id != original_session_id + assert agent.session_id == agent.interaction_logger.session_id + + +class TestStatsEdgeCases: + @pytest.mark.asyncio + async def test_session_cost_approximation_on_model_change( + self, monkeypatch + ) -> None: + monkeypatch.setenv("LECHAT_API_KEY", "mock-key") + + backend = FakeBackend([ + mock_llm_chunk(content="Response", finish_reason="stop") + ]) + config1 = make_config(active_model="devstral-latest") + agent = Agent(config1, backend=backend) + + async for _ in agent.act("Hello"): + pass + + cost_before = agent.stats.session_cost + + config2 = make_config(active_model="strawberry") + await agent.reload_with_initial_messages(config=config2) + + cost_after = agent.stats.session_cost + + assert cost_after > cost_before + + @pytest.mark.asyncio + async def test_multiple_reloads_accumulate_correctly(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="R1", finish_reason="stop"), + mock_llm_chunk(content="R2", finish_reason="stop"), + mock_llm_chunk(content="R3", finish_reason="stop"), + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("First"): + pass + tokens1 = agent.stats.session_total_llm_tokens + + await agent.reload_with_initial_messages() + async for _ in agent.act("Second"): + pass + tokens2 = agent.stats.session_total_llm_tokens + + await agent.reload_with_initial_messages() + async for _ in agent.act("Third"): + pass + tokens3 = agent.stats.session_total_llm_tokens + + assert tokens1 < tokens2 < tokens3 + + @pytest.mark.asyncio + async def test_compact_then_reload_preserves_both(self) -> None: + backend = FakeBackend([ + mock_llm_chunk(content="Initial response", finish_reason="stop"), + mock_llm_chunk(content="", finish_reason="stop"), + mock_llm_chunk(content="After reload", finish_reason="stop"), + ]) + agent = Agent(make_config(), backend=backend) + + async for _ in agent.act("Build something"): + pass + + await agent.compact() + tokens_after_compact = agent.stats.session_prompt_tokens + + await agent.reload_with_initial_messages() + + assert agent.stats.session_prompt_tokens == tokens_after_compact + + async for _ in agent.act("Continue"): + pass + + assert agent.stats.session_prompt_tokens > tokens_after_compact + + @pytest.mark.asyncio + async def test_reload_without_config_preserves_current(self) -> None: + backend = FakeBackend([]) + original_config = make_config(active_model="devstral-latest") + agent = Agent(original_config, backend=backend) + + await agent.reload_with_initial_messages(config=None) + + assert agent.config.active_model == "devstral-latest" + + @pytest.mark.asyncio + async def test_reload_with_new_config_updates_it(self) -> None: + backend = FakeBackend([]) + original_config = make_config(active_model="devstral-latest") + agent = Agent(original_config, backend=backend) + + new_config = make_config(active_model="devstral-small") + await agent.reload_with_initial_messages(config=new_config) + + assert agent.config.active_model == "devstral-small" diff --git a/tests/test_agent_tool_call.py b/tests/test_agent_tool_call.py new file mode 100644 index 0000000..2230ad6 --- /dev/null +++ b/tests/test_agent_tool_call.py @@ -0,0 +1,477 @@ +from __future__ import annotations + +import asyncio +import json +from typing import Any + +import pytest + +from tests.mock.utils import mock_llm_chunk +from tests.stubs.fake_backend import FakeBackend +from tests.stubs.fake_tool import FakeTool +from vibe.core.agent import Agent +from vibe.core.config import SessionLoggingConfig, VibeConfig +from vibe.core.tools.base import BaseToolConfig, ToolPermission +from vibe.core.tools.builtins.todo import TodoItem +from vibe.core.types import ( + AssistantEvent, + BaseEvent, + FunctionCall, + LLMMessage, + Role, + SyncApprovalCallback, + ToolCall, + ToolCallEvent, + ToolResultEvent, +) +from vibe.core.utils import ApprovalResponse + + +async def act_and_collect_events(agent: Agent, prompt: str) -> list[BaseEvent]: + return [ev async for ev in agent.act(prompt)] + + +def make_config(todo_permission: ToolPermission = ToolPermission.ALWAYS) -> VibeConfig: + return VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), + auto_compact_threshold=0, + enabled_tools=["todo"], + tools={"todo": BaseToolConfig(permission=todo_permission)}, + system_prompt_id="tests", + include_project_context=False, + include_prompt_detail=False, + ) + + +def make_todo_tool_call(call_id: str, action: str = "read") -> ToolCall: + return ToolCall( + id=call_id, + function=FunctionCall(name="todo", arguments=f'{{"action": "{action}"}}'), + ) + + +def make_agent( + *, + auto_approve: bool = True, + todo_permission: ToolPermission = ToolPermission.ALWAYS, + backend: FakeBackend, + approval_callback: SyncApprovalCallback | None = None, +) -> Agent: + agent = Agent( + make_config(todo_permission=todo_permission), + auto_approve=auto_approve, + backend=backend, + ) + if approval_callback: + agent.set_approval_callback(approval_callback) + return agent + + +@pytest.mark.asyncio +async def test_single_tool_call_executes_under_auto_approve() -> None: + mocked_tool_call_id = "call_1" + tool_call = make_todo_tool_call(mocked_tool_call_id) + backend = FakeBackend([ + mock_llm_chunk(content="Let me check your todos.", tool_calls=[tool_call]), + mock_llm_chunk(content="I retrieved 0 todos.", finish_reason="stop"), + ]) + agent = make_agent(auto_approve=True, backend=backend) + + events = await act_and_collect_events(agent, "What's my todo list?") + + assert [type(e) for e in events] == [ + AssistantEvent, + ToolCallEvent, + ToolResultEvent, + AssistantEvent, + ] + assert isinstance(events[0], AssistantEvent) + assert events[0].content == "Let me check your todos." + assert isinstance(events[1], ToolCallEvent) + assert events[1].tool_name == "todo" + assert isinstance(events[2], ToolResultEvent) + assert events[2].error is None + assert events[2].skipped is False + assert events[2].result is not None + assert isinstance(events[3], AssistantEvent) + assert events[3].content == "I retrieved 0 todos." + # check conversation history + tool_msgs = [m for m in agent.messages if m.role == Role.tool] + assert len(tool_msgs) == 1 + assert tool_msgs[-1].tool_call_id == mocked_tool_call_id + assert "total_count" in (tool_msgs[-1].content or "") + + +@pytest.mark.asyncio +async def test_tool_call_requires_approval_if_not_auto_approved() -> None: + agent = make_agent( + auto_approve=False, + todo_permission=ToolPermission.ASK, + backend=FakeBackend([ + mock_llm_chunk( + content="Let me check your todos.", + tool_calls=[make_todo_tool_call("call_2")], + ), + mock_llm_chunk( + content="I cannot execute the tool without approval.", + finish_reason="stop", + ), + ]), + ) + + events = await act_and_collect_events(agent, "What's my todo list?") + + assert isinstance(events[1], ToolCallEvent) + assert events[1].tool_name == "todo" + assert isinstance(events[2], ToolResultEvent) + assert events[2].skipped is True + assert events[2].error is None + assert events[2].result is None + assert events[2].skip_reason is not None + assert "not permitted" in events[2].skip_reason.lower() + assert isinstance(events[3], AssistantEvent) + assert events[3].content == "I cannot execute the tool without approval." + assert agent.stats.tool_calls_rejected == 1 + assert agent.stats.tool_calls_agreed == 0 + assert agent.stats.tool_calls_succeeded == 0 + + +@pytest.mark.asyncio +async def test_tool_call_approved_by_callback() -> None: + def approval_callback( + _tool_name: str, _args: dict[str, Any], _tool_call_id: str + ) -> tuple[str, str | None]: + return (ApprovalResponse.YES, None) + + agent = make_agent( + auto_approve=False, + todo_permission=ToolPermission.ASK, + approval_callback=approval_callback, + backend=FakeBackend([ + mock_llm_chunk( + content="Let me check your todos.", + tool_calls=[make_todo_tool_call("call_3")], + ), + mock_llm_chunk(content="I retrieved 0 todos.", finish_reason="stop"), + ]), + ) + + events = await act_and_collect_events(agent, "What's my todo list?") + + assert isinstance(events[2], ToolResultEvent) + assert events[2].skipped is False + assert events[2].error is None + assert events[2].result is not None + assert agent.stats.tool_calls_agreed == 1 + assert agent.stats.tool_calls_rejected == 0 + assert agent.stats.tool_calls_succeeded == 1 + + +@pytest.mark.asyncio +async def test_tool_call_rejected_when_auto_approve_disabled_and_rejected_by_callback() -> ( + None +): + custom_feedback = "User declined tool execution" + + def approval_callback( + _tool_name: str, _args: dict[str, Any], _tool_call_id: str + ) -> tuple[str, str | None]: + return (ApprovalResponse.NO, custom_feedback) + + agent = make_agent( + auto_approve=False, + todo_permission=ToolPermission.ASK, + approval_callback=approval_callback, + backend=FakeBackend([ + mock_llm_chunk( + content="Let me check your todos.", + tool_calls=[make_todo_tool_call("call_4")], + ), + mock_llm_chunk( + content="Understood, I won't check the todos.", finish_reason="stop" + ), + ]), + ) + + events = await act_and_collect_events(agent, "What's my todo list?") + + assert isinstance(events[2], ToolResultEvent) + assert events[2].skipped is True + assert events[2].error is None + assert events[2].result is None + assert events[2].skip_reason == custom_feedback + assert agent.stats.tool_calls_rejected == 1 + assert agent.stats.tool_calls_agreed == 0 + assert agent.stats.tool_calls_succeeded == 0 + + +@pytest.mark.asyncio +async def test_tool_call_skipped_when_permission_is_never() -> None: + agent = make_agent( + auto_approve=False, + todo_permission=ToolPermission.NEVER, + backend=FakeBackend([ + mock_llm_chunk( + content="Let me check your todos.", + tool_calls=[make_todo_tool_call("call_never")], + ), + mock_llm_chunk(content="Tool is disabled.", finish_reason="stop"), + ]), + ) + + events = await act_and_collect_events(agent, "What's my todo list?") + + assert isinstance(events[2], ToolResultEvent) + assert events[2].skipped is True + assert events[2].error is None + assert events[2].result is None + assert events[2].skip_reason is not None + assert "permanently disabled" in events[2].skip_reason.lower() + tool_msgs = [m for m in agent.messages if m.role == Role.tool and m.name == "todo"] + assert len(tool_msgs) == 1 + assert tool_msgs[0].name == "todo" + assert events[2].skip_reason in (tool_msgs[-1].content or "") + assert agent.stats.tool_calls_rejected == 1 + assert agent.stats.tool_calls_agreed == 0 + assert agent.stats.tool_calls_succeeded == 0 + + +@pytest.mark.asyncio +async def test_approval_always_flips_auto_approve_for_subsequent_calls() -> None: + callback_invocations = [] + + def approval_callback( + tool_name: str, _args: dict[str, Any], _tool_call_id: str + ) -> tuple[str, str | None]: + callback_invocations.append(tool_name) + return (ApprovalResponse.ALWAYS, None) + + agent = make_agent( + auto_approve=False, + todo_permission=ToolPermission.ASK, + approval_callback=approval_callback, + backend=FakeBackend([ + mock_llm_chunk( + content="First check.", tool_calls=[make_todo_tool_call("call_first")] + ), + mock_llm_chunk(content="First done.", finish_reason="stop"), + mock_llm_chunk( + content="Second check.", tool_calls=[make_todo_tool_call("call_second")] + ), + mock_llm_chunk(content="Second done.", finish_reason="stop"), + ]), + ) + + events1 = await act_and_collect_events(agent, "First request") + events2 = await act_and_collect_events(agent, "Second request") + + assert agent.auto_approve is True + assert len(callback_invocations) == 1 + assert callback_invocations[0] == "todo" + assert isinstance(events1[2], ToolResultEvent) + assert events1[2].skipped is False + assert events1[2].result is not None + assert isinstance(events2[2], ToolResultEvent) + assert events2[2].skipped is False + assert events2[2].result is not None + assert agent.stats.tool_calls_rejected == 0 + assert agent.stats.tool_calls_succeeded == 2 + + +@pytest.mark.asyncio +async def test_tool_call_with_invalid_action() -> None: + tool_call = ToolCall( + id="call_5", + function=FunctionCall(name="todo", arguments='{"action": "invalid_action"}'), + ) + agent = make_agent( + auto_approve=True, + backend=FakeBackend([ + mock_llm_chunk(content="Let me check your todos.", tool_calls=[tool_call]), + mock_llm_chunk( + content="I encountered an error with the action.", finish_reason="stop" + ), + ]), + ) + + events = await act_and_collect_events(agent, "What's my todo list?") + + assert isinstance(events[2], ToolResultEvent) + assert events[2].error is not None + assert events[2].result is None + assert "tool_error" in events[2].error.lower() + assert agent.stats.tool_calls_failed == 1 + + +@pytest.mark.asyncio +async def test_tool_call_with_duplicate_todo_ids() -> None: + duplicate_todos = [ + TodoItem(id="duplicate", content="Task 1"), + TodoItem(id="duplicate", content="Task 2"), + ] + tool_call = ToolCall( + id="call_6", + function=FunctionCall( + name="todo", + arguments=json.dumps({ + "action": "write", + "todos": [t.model_dump() for t in duplicate_todos], + }), + ), + ) + agent = make_agent( + auto_approve=True, + backend=FakeBackend([ + mock_llm_chunk(content="Let me write todos.", tool_calls=[tool_call]), + mock_llm_chunk( + content="I couldn't write todos with duplicate IDs.", + finish_reason="stop", + ), + ]), + ) + + events = await act_and_collect_events(agent, "Add todos") + + assert isinstance(events[2], ToolResultEvent) + assert events[2].error is not None + assert events[2].result is None + assert "unique" in events[2].error.lower() + assert agent.stats.tool_calls_failed == 1 + + +@pytest.mark.asyncio +async def test_tool_call_with_exceeding_max_todos() -> None: + many_todos = [TodoItem(id=f"todo_{i}", content=f"Task {i}") for i in range(150)] + tool_call = ToolCall( + id="call_7", + function=FunctionCall( + name="todo", + arguments=json.dumps({ + "action": "write", + "todos": [t.model_dump() for t in many_todos], + }), + ), + ) + agent = make_agent( + auto_approve=True, + backend=FakeBackend([ + mock_llm_chunk(content="Let me write todos.", tool_calls=[tool_call]), + mock_llm_chunk( + content="I couldn't write that many todos.", finish_reason="stop" + ), + ]), + ) + + events = await act_and_collect_events(agent, "Add todos") + + assert isinstance(events[2], ToolResultEvent) + assert events[2].error is not None + assert events[2].result is None + assert "100" in events[2].error + assert agent.stats.tool_calls_failed == 1 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "exception_class", + [ + pytest.param(KeyboardInterrupt, id="keyboard_interrupt"), + pytest.param(asyncio.CancelledError, id="asyncio_cancelled"), + ], +) +async def test_tool_call_can_be_interrupted( + exception_class: type[BaseException], +) -> None: + tool_call = ToolCall( + id="call_8", function=FunctionCall(name="stub_tool", arguments="{}") + ) + config = VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), + auto_compact_threshold=0, + enabled_tools=["stub_tool"], + ) + agent = Agent( + config, + auto_approve=True, + backend=FakeBackend([ + mock_llm_chunk(content="Let me use the tool.", tool_calls=[tool_call]), + mock_llm_chunk(content="Tool execution completed.", finish_reason="stop"), + ]), + ) + # no dependency injection available => monkey patch + agent.tool_manager._available["stub_tool"] = FakeTool + stub_tool_instance = agent.tool_manager.get("stub_tool") + assert isinstance(stub_tool_instance, FakeTool) + stub_tool_instance._exception_to_raise = exception_class() + + events: list[BaseEvent] = [] + with pytest.raises(exception_class): + async for ev in agent.act("Execute tool"): + events.append(ev) + + tool_result_event = next( + (e for e in events if isinstance(e, ToolResultEvent)), None + ) + assert tool_result_event is not None + assert tool_result_event.error is not None + assert "execution interrupted by user" in tool_result_event.error.lower() + + +@pytest.mark.asyncio +async def test_fill_missing_tool_responses_inserts_placeholders() -> None: + agent = Agent( + make_config(), + auto_approve=True, + backend=FakeBackend([mock_llm_chunk(content="ok", finish_reason="stop")]), + ) + tool_calls_messages = [ + ToolCall( + id="tc1", function=FunctionCall(name="todo", arguments='{"action": "read"}') + ), + ToolCall( + id="tc2", function=FunctionCall(name="todo", arguments='{"action": "read"}') + ), + ] + assistant_msg = LLMMessage( + role=Role.assistant, content="Calling tools...", tool_calls=tool_calls_messages + ) + agent.messages = [ + agent.messages[0], + assistant_msg, + # only one tool responded: the second is missing + LLMMessage( + role=Role.tool, tool_call_id="tc1", name="todo", content="Retrieved 0 todos" + ), + ] + + await act_and_collect_events(agent, "Proceed") + + tool_msgs = [m for m in agent.messages if m.role == Role.tool] + assert any(m.tool_call_id == "tc2" for m in tool_msgs) + # find placeholder message for tc2 + placeholder = next(m for m in tool_msgs if m.tool_call_id == "tc2") + assert placeholder.name == "todo" + assert ( + placeholder.content + == "Tool execution interrupted - no response available" + ) + + +@pytest.mark.asyncio +async def test_ensure_assistant_after_tool_appends_understood() -> None: + agent = Agent( + make_config(), + auto_approve=True, + backend=FakeBackend([mock_llm_chunk(content="ok", finish_reason="stop")]), + ) + tool_msg = LLMMessage( + role=Role.tool, tool_call_id="tc_z", name="todo", content="Done" + ) + agent.messages = [agent.messages[0], tool_msg] + + await act_and_collect_events(agent, "Next") + + # find the seeded tool message and ensure the next message is "Understood." + idx = next(i for i, m in enumerate(agent.messages) if m.role == Role.tool) + assert agent.messages[idx + 1].role == Role.assistant + assert agent.messages[idx + 1].content == "Understood." diff --git a/tests/test_cli_programmatic_preload.py b/tests/test_cli_programmatic_preload.py new file mode 100644 index 0000000..8e2c78a --- /dev/null +++ b/tests/test_cli_programmatic_preload.py @@ -0,0 +1,137 @@ +from __future__ import annotations + +import pytest + +from tests.mock.mock_backend_factory import mock_backend_factory +from tests.mock.utils import mock_llm_chunk +from tests.stubs.fake_backend import FakeBackend +from vibe.core import run_programmatic +from vibe.core.config import Backend, SessionLoggingConfig, VibeConfig +from vibe.core.types import LLMMessage, OutputFormat, Role + + +class SpyStreamingFormatter: + def __init__(self) -> None: + self.emitted: list[tuple[Role, str | None]] = [] + + def on_message_added(self, message: LLMMessage) -> None: + self.emitted.append((message.role, message.content)) + + def on_event(self, _event) -> None: # No-op for this test + pass + + def finalize(self) -> str | None: + return None + + +def test_run_programmatic_preload_streaming_is_batched( + monkeypatch: pytest.MonkeyPatch, +) -> None: + spy = SpyStreamingFormatter() + monkeypatch.setattr( + "vibe.core.programmatic.create_formatter", lambda *_args, **_kwargs: spy + ) + + with mock_backend_factory( + Backend.MISTRAL, + lambda provider, **kwargs: FakeBackend([ + mock_llm_chunk( + content="Decorators are wrappers that modify function behavior.", + finish_reason="stop", + ) + ]), + ): + cfg = VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), + system_prompt_id="tests", + include_project_context=False, + include_prompt_detail=False, + include_model_info=False, + ) + + previous = [ + LLMMessage( + role=Role.system, content="This system message should be ignored." + ), + LLMMessage( + role=Role.user, content="Previously, you told me about decorators." + ), + LLMMessage( + role=Role.assistant, + content="Sure, decorators allow you to wrap functions.", + ), + ] + + run_programmatic( + config=cfg, + prompt="Can you summarize what decorators are?", + output_format=OutputFormat.STREAMING, + previous_messages=previous, + ) + + roles = [r for r, _ in spy.emitted] + assert roles == [ + Role.system, + Role.user, + Role.assistant, + Role.user, + Role.assistant, + ] + assert ( + spy.emitted[0][1] == "You are Vibe, a super useful programming assistant." + ) + assert spy.emitted[1][1] == "Previously, you told me about decorators." + assert spy.emitted[2][1] == "Sure, decorators allow you to wrap functions." + assert spy.emitted[3][1] == "Can you summarize what decorators are?" + assert ( + spy.emitted[4][1] + == "Decorators are wrappers that modify function behavior." + ) + + +def test_run_programmatic_ignores_system_messages_in_previous( + monkeypatch: pytest.MonkeyPatch, +) -> None: + spy = SpyStreamingFormatter() + monkeypatch.setattr( + "vibe.core.programmatic.create_formatter", lambda *_args, **_kwargs: spy + ) + + with mock_backend_factory( + Backend.MISTRAL, + lambda provider, **kwargs: FakeBackend([mock_llm_chunk(content="Understood.")]), + ): + cfg = VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), + system_prompt_id="tests", + include_project_context=False, + include_prompt_detail=False, + include_model_info=False, + ) + + run_programmatic( + config=cfg, + prompt="Let's move on to practical examples.", + output_format=OutputFormat.STREAMING, + previous_messages=[ + LLMMessage( + role=Role.system, + content="First system message that should be ignored.", + ), + LLMMessage(role=Role.user, content="Continue our previous discussion."), + LLMMessage( + role=Role.system, + content="Second system message that should be ignored.", + ), + ], + auto_approve=True, + ) + + roles = [r for r, _ in spy.emitted] + assert roles == [Role.system, Role.user, Role.user, Role.assistant] + assert ( + spy.emitted[0][1] == "You are Vibe, a super useful programming assistant." + ) + assert spy.emitted[1][1] == "Continue our previous discussion." + assert spy.emitted[2][1] == "Let's move on to practical examples." + assert spy.emitted[3][1] == "Understood." diff --git a/tests/test_history_manager.py b/tests/test_history_manager.py new file mode 100644 index 0000000..71d5545 --- /dev/null +++ b/tests/test_history_manager.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import json +from pathlib import Path + +from vibe.cli.history_manager import HistoryManager + + +def test_history_manager_normalizes_loaded_entries_like_numbers_to_strings( + tmp_path: Path, +) -> None: + # ideally, we would not use real I/O; but this test is a quick bugfix, thus it + # does not intend to refactor the HistoryManager + history_file = tmp_path / "history.jsonl" + history_entries = ["hello", 123] + history_file.write_text( + "\n".join(json.dumps(entry) for entry in history_entries) + "\n", + encoding="utf-8", + ) + manager = HistoryManager(history_file) + + result = manager.get_previous(current_input="", prefix="1") + + assert result == "123" + + +def test_history_manager_retains_a_fixed_number_of_entries(tmp_path: Path) -> None: + history_file = tmp_path / "history.jsonl" + manager = HistoryManager(history_file, max_entries=3) + + manager.add("first") + manager.add("second") + manager.add("third") + manager.add("fourth") + + reloaded = HistoryManager(history_file) + + assert reloaded.get_previous(current_input="", prefix="") == "fourth" + assert reloaded.get_previous(current_input="", prefix="") == "third" + assert reloaded.get_previous(current_input="", prefix="") == "second" + # "first" is not proposed as we defined number of entries to 3 + assert reloaded.get_previous(current_input="", prefix="") is None + + +def test_history_manager_filters_invalid_and_duplicated_entries(tmp_path: Path) -> None: + history_file = tmp_path / "history.jsonl" + manager = HistoryManager(history_file, max_entries=5) + manager.add("") # empty + manager.add(" ") # is trimmed + manager.add("first") + manager.add("second") + manager.add("second") # duplicate + manager.add("third") + + reloaded = HistoryManager(history_file) + + assert reloaded.get_previous(current_input="", prefix="") == "third" + assert reloaded.get_previous(current_input="", prefix="") == "second" + assert reloaded.get_previous(current_input="", prefix="") == "first" + assert reloaded.get_previous(current_input="", prefix="") is None + assert reloaded.get_previous(current_input="", prefix="") is None + + +def test_history_manager_filters_commands(tmp_path: Path) -> None: + history_file = tmp_path / "history.jsonl" + manager = HistoryManager(history_file, max_entries=5) + manager.add("first") + manager.add("/skip") + + reloaded = HistoryManager(history_file) + + assert reloaded.get_previous(current_input="", prefix="/") == None + assert reloaded.get_previous(current_input="", prefix="") == "first" + assert reloaded.get_previous(current_input="", prefix="") is None + + +def test_history_manager_allows_navigation_round_trip(tmp_path: Path) -> None: + history_file = tmp_path / "history.jsonl" + manager = HistoryManager(history_file) + + manager.add("alpha") + manager.add("beta") + + assert manager.get_previous(current_input="typed") == "beta" + assert manager.get_previous(current_input="typed") == "alpha" + assert manager.get_next() == "beta" + assert manager.get_next() == "typed" + assert manager.get_next() is None + + +def test_history_manager_prefix_filtering(tmp_path: Path) -> None: + history_file = tmp_path / "history.jsonl" + manager = HistoryManager(history_file) + + manager.add("foo") + manager.add("bar") + manager.add("fizz") + + assert manager.get_previous(current_input="", prefix="f") == "fizz" + assert manager.get_previous(current_input="", prefix="f") == "foo" + assert manager.get_previous(current_input="", prefix="f") is None diff --git a/tests/test_system_prompt.py b/tests/test_system_prompt.py new file mode 100644 index 0000000..d5a1165 --- /dev/null +++ b/tests/test_system_prompt.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import sys + +import pytest + +from vibe.core.config import VibeConfig +from vibe.core.system_prompt import get_universal_system_prompt +from vibe.core.tools.manager import ToolManager + + +def test_get_universal_system_prompt_includes_windows_prompt_on_windows( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.setattr(sys, "platform", "win32") + monkeypatch.setenv("COMSPEC", "C:\\Windows\\System32\\cmd.exe") + + config = VibeConfig( + system_prompt_id="tests", + include_project_context=False, + include_prompt_detail=True, + include_model_info=False, + ) + tool_manager = ToolManager(config) + + prompt = get_universal_system_prompt(tool_manager, config) + + assert "You are Vibe, a super useful programming assistant." in prompt + assert ( + "The operating system is Windows with shell `C:\\Windows\\System32\\cmd.exe`" + in prompt + ) + assert "DO NOT use Unix commands like `ls`, `grep`, `cat`" in prompt + assert "Use: `dir` (Windows) for directory listings" in prompt + assert "Use: backslashes (\\\\) for paths" in prompt + assert "Check command availability with: `where command` (Windows)" in prompt + assert "Script shebang: Not applicable on Windows" in prompt diff --git a/tests/test_tagged_text.py b/tests/test_tagged_text.py new file mode 100644 index 0000000..0fac531 --- /dev/null +++ b/tests/test_tagged_text.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import pytest + +from vibe.core.utils import CANCELLATION_TAG, KNOWN_TAGS, TaggedText + + +def test_tagged_text_creation_without_tag() -> None: + tagged = TaggedText("Hello world") + assert tagged.message == "Hello world" + assert tagged.tag == "" + assert str(tagged) == "Hello world" + + +def test_tagged_text_creation_with_tag() -> None: + tagged = TaggedText("User cancelled", CANCELLATION_TAG) + assert tagged.message == "User cancelled" + assert tagged.tag == CANCELLATION_TAG + assert str(tagged) == f"<{CANCELLATION_TAG}>User cancelled" + + +@pytest.mark.parametrize("tag", KNOWN_TAGS) +def test_tagged_text_from_string_with_known_tag(tag: str) -> None: + text = f"<{tag}>This is a tagged text" + tagged = TaggedText.from_string(text) + assert tagged.message == "This is a tagged text" + assert tagged.tag == tag + + +@pytest.mark.parametrize("tag", KNOWN_TAGS) +def test_tagged_text_from_string_with_known_tag_multiline(tag: str) -> None: + text = f"<{tag}>This is a tagged text" + tagged = TaggedText.from_string(text) + assert tagged.message == "This is a tagged text" + assert tagged.tag == tag + + +@pytest.mark.parametrize("tag", KNOWN_TAGS) +def test_tagged_text_from_string_with_known_tag_whitespace(tag: str) -> None: + text = f"<{tag}> This is a tagged text " + tagged = TaggedText.from_string(text) + assert tagged.message == " This is a tagged text " + assert tagged.tag == tag + + +def test_tagged_text_from_string_with_unknown_tag() -> None: + text = "Some content" + tagged = TaggedText.from_string(text) + assert tagged.message == "Some content" + assert tagged.tag == "" + + +def test_tagged_text_from_string_with_text_before_tag() -> None: + text = f"Prefix text <{CANCELLATION_TAG}>Content" + tagged = TaggedText.from_string(text) + assert tagged.message == "Prefix text Content" + assert tagged.tag == CANCELLATION_TAG + + +def test_tagged_text_from_string_with_text_after_tag() -> None: + text = f"<{CANCELLATION_TAG}>Content Suffix text" + tagged = TaggedText.from_string(text) + assert tagged.message == "Content Suffix text" + assert tagged.tag == CANCELLATION_TAG + + +def test_tagged_text_from_string_with_text_before_and_after_tag() -> None: + text = f"Before <{CANCELLATION_TAG}>Content After" + tagged = TaggedText.from_string(text) + assert tagged.message == "Before Content After" + assert tagged.tag == CANCELLATION_TAG + + +def test_tagged_text_from_string_without_tags() -> None: + text = "Just plain text without any tags" + tagged = TaggedText.from_string(text) + assert tagged.message == "Just plain text without any tags" + assert tagged.tag == "" + + +def test_tagged_text_from_string_empty() -> None: + tagged = TaggedText.from_string("") + assert tagged.message == "" + assert tagged.tag == "" + + +def test_tagged_text_from_string_mismatched_tags() -> None: + text = f"<{CANCELLATION_TAG}>Content" + tagged = TaggedText.from_string(text) + assert tagged.message == f"<{CANCELLATION_TAG}>Content" + assert tagged.tag == "" + + +def test_tagged_text_round_trip() -> None: + original = TaggedText("User cancelled", CANCELLATION_TAG) + text = str(original) + parsed = TaggedText.from_string(text) + assert parsed.message == original.message + assert parsed.tag == original.tag + + +def test_tagged_text_round_trip_no_tag() -> None: + original = TaggedText("Plain message") + text = str(original) + parsed = TaggedText.from_string(text) + assert parsed.message == original.message + assert parsed.tag == original.tag diff --git a/tests/test_ui_input_history.py b/tests/test_ui_input_history.py new file mode 100644 index 0000000..854ba23 --- /dev/null +++ b/tests/test_ui_input_history.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from vibe.cli.history_manager import HistoryManager +from vibe.cli.textual_ui.app import VibeApp +from vibe.cli.textual_ui.widgets.chat_input.body import ChatInputBody +from vibe.cli.textual_ui.widgets.chat_input.container import ChatInputContainer +from vibe.core.config import SessionLoggingConfig, VibeConfig + + +@pytest.fixture +def vibe_config() -> VibeConfig: + return VibeConfig(session_logging=SessionLoggingConfig(enabled=False)) + + +@pytest.fixture +def vibe_app(vibe_config: VibeConfig, tmp_path: Path) -> VibeApp: + return VibeApp(config=vibe_config) + + +@pytest.fixture +def history_file(tmp_path: Path) -> Path: + history_file = tmp_path / "history.jsonl" + history_entries = ["hello", "hi there", "how are you?"] + history_file.write_text( + "\n".join(json.dumps(entry) for entry in history_entries) + "\n", + encoding="utf-8", + ) + return history_file + + +def inject_history_file(vibe_app: VibeApp, history_file: Path) -> None: + # Dependency Injection would help here, but as we don't have it yet: manual injection + chat_input_body = vibe_app.query_one(ChatInputBody) + chat_input_body.history = HistoryManager(history_file) + + +@pytest.mark.asyncio +async def test_ui_navigation_through_input_history( + vibe_app: VibeApp, history_file: Path +) -> None: + async with vibe_app.run_test() as pilot: + inject_history_file(vibe_app, history_file) + chat_input = vibe_app.query_one(ChatInputContainer) + + await pilot.press("up") + assert chat_input.value == "how are you?" + await pilot.press("up") + assert chat_input.value == "hi there" + await pilot.press("up") + assert chat_input.value == "hello" + await pilot.press("up") + # cannot go further up + assert chat_input.value == "hello" + await pilot.press("down") + assert chat_input.value == "hi there" + await pilot.press("down") + assert chat_input.value == "how are you?" + await pilot.press("down") + assert chat_input.value == "" + + +@pytest.mark.asyncio +async def test_ui_does_nothing_if_command_completion_is_active( + vibe_app: VibeApp, history_file: Path +) -> None: + async with vibe_app.run_test() as pilot: + inject_history_file(vibe_app, history_file) + chat_input = vibe_app.query_one(ChatInputContainer) + + await pilot.press("/") + assert chat_input.value == "/" + await pilot.press("up") + assert chat_input.value == "/" + await pilot.press("down") + assert chat_input.value == "/" + + +@pytest.mark.asyncio +async def test_ui_does_not_prevent_arrow_down_to_move_cursor_to_bottom_lines( + vibe_app: VibeApp, +): + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + textarea = chat_input.input_widget + assert textarea is not None + + await pilot.press(*"test") + await pilot.press("ctrl+j", "ctrl+j") + assert chat_input.value == "test\n\n" + assert textarea.text.count("\n") == 2 + initial_row = textarea.cursor_location[0] + assert initial_row == 2, f"Expected cursor on line 2, got line {initial_row}" + await pilot.press("up") + assert textarea.cursor_location[0] == 1, "First arrow up should move to line 1" + await pilot.press("up") + assert textarea.cursor_location[0] == 0, ( + "Second arrow up should move to line 0 (first line)" + ) + await pilot.press("down") + final_row = textarea.cursor_location[0] + assert final_row == 1, f"cursor is still on line {final_row}." + + +@pytest.mark.asyncio +async def test_ui_resumes_arrow_down_after_manual_move( + vibe_app: VibeApp, tmp_path: Path +) -> None: + history_path = tmp_path / "history.jsonl" + history_path.write_text( + json.dumps("first line\nsecond line") + "\n", encoding="utf-8" + ) + + async with vibe_app.run_test() as pilot: + inject_history_file(vibe_app, history_path) + chat_input = vibe_app.query_one(ChatInputContainer) + textarea = chat_input.input_widget + assert textarea is not None + + await pilot.press("up") + assert chat_input.value == "first line\nsecond line" + assert textarea.cursor_location == (0, len("first line")) + await pilot.press("left") + await pilot.press("down") + assert textarea.cursor_location[0] == 1 + assert chat_input.value == "first line\nsecond line" diff --git a/tests/test_ui_pending_user_message.py b/tests/test_ui_pending_user_message.py new file mode 100644 index 0000000..dcf0a02 --- /dev/null +++ b/tests/test_ui_pending_user_message.py @@ -0,0 +1,161 @@ +from __future__ import annotations + +import asyncio +from collections.abc import AsyncGenerator, Callable +import time +from types import SimpleNamespace + +import pytest + +from vibe.cli.textual_ui.app import VibeApp +from vibe.cli.textual_ui.widgets.chat_input.container import ChatInputContainer +from vibe.cli.textual_ui.widgets.messages import InterruptMessage, UserMessage +from vibe.core.agent import Agent +from vibe.core.config import SessionLoggingConfig, VibeConfig +from vibe.core.types import BaseEvent + + +async def _wait_for( + pilot, condition: Callable[[], object | None], timeout: float = 3.0 +) -> object | None: + deadline = time.monotonic() + timeout + while time.monotonic() < deadline: + result = condition() + if result: + return result + await pilot.pause(0.05) + return None + + +class StubAgent(Agent): + def __init__(self) -> None: + self.messages: list = [] + self.stats = SimpleNamespace(context_tokens=0) + self.approval_callback = None + + async def initialize(self) -> None: + return + + async def act(self, msg: str) -> AsyncGenerator[BaseEvent]: + if False: + yield msg + + +@pytest.fixture +def vibe_config() -> VibeConfig: + return VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), enable_update_checks=False + ) + + +@pytest.fixture +def vibe_app(vibe_config: VibeConfig) -> VibeApp: + return VibeApp(config=vibe_config) + + +def _patch_delayed_init( + monkeypatch: pytest.MonkeyPatch, init_event: asyncio.Event +) -> None: + async def _fake_initialize(self: VibeApp) -> None: + if self.agent or self._agent_initializing: + return + + self._agent_initializing = True + try: + await init_event.wait() + self.agent = StubAgent() + except asyncio.CancelledError: + self.agent = None + return + finally: + self._agent_initializing = False + self._agent_init_task = None + + monkeypatch.setattr(VibeApp, "_initialize_agent", _fake_initialize, raising=True) + + +@pytest.mark.asyncio +async def test_shows_user_message_as_pending_until_agent_is_initialized( + vibe_app: VibeApp, monkeypatch: pytest.MonkeyPatch +) -> None: + init_event = asyncio.Event() + _patch_delayed_init(monkeypatch, init_event) + + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "Hello" + + press_task = asyncio.create_task(pilot.press("enter")) + + user_message = await _wait_for( + pilot, lambda: next(iter(vibe_app.query(UserMessage)), None) + ) + assert isinstance(user_message, UserMessage) + assert user_message.has_class("pending") + init_event.set() + await press_task + assert not user_message.has_class("pending") + + +@pytest.mark.asyncio +async def test_can_interrupt_pending_message_during_initialization( + vibe_app: VibeApp, monkeypatch: pytest.MonkeyPatch +) -> None: + init_event = asyncio.Event() + _patch_delayed_init(monkeypatch, init_event) + + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "Hello" + + press_task = asyncio.create_task(pilot.press("enter")) + + user_message = await _wait_for( + pilot, lambda: next(iter(vibe_app.query(UserMessage)), None) + ) + assert isinstance(user_message, UserMessage) + assert user_message.has_class("pending") + + await pilot.press("escape") + await press_task + assert not user_message.has_class("pending") + assert vibe_app.query(InterruptMessage) + assert vibe_app.agent is None + + +@pytest.mark.asyncio +async def test_retry_initialization_after_interrupt( + vibe_app: VibeApp, monkeypatch: pytest.MonkeyPatch +) -> None: + init_event = asyncio.Event() + _patch_delayed_init(monkeypatch, init_event) + + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "First Message" + press_task = asyncio.create_task(pilot.press("enter")) + + await _wait_for(pilot, lambda: next(iter(vibe_app.query(UserMessage)), None)) + await pilot.press("escape") + await press_task + assert vibe_app.agent is None + assert vibe_app._agent_init_task is None + + chat_input.value = "Second Message" + press_task_2 = asyncio.create_task(pilot.press("enter")) + + def get_second_message(): + messages = list(vibe_app.query(UserMessage)) + if len(messages) >= 2: + return messages[-1] + return None + + user_message_2 = await _wait_for(pilot, get_second_message) + assert isinstance(user_message_2, UserMessage) + assert user_message_2.has_class("pending") + assert vibe_app.agent is None + + init_event.set() + await press_task_2 + assert not user_message_2.has_class("pending") + assert vibe_app.agent is not None diff --git a/tests/tools/test_bash.py b/tests/tools/test_bash.py new file mode 100644 index 0000000..2ba04da --- /dev/null +++ b/tests/tools/test_bash.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import pytest + +from vibe.core.tools.base import BaseToolState, ToolError, ToolPermission +from vibe.core.tools.builtins.bash import Bash, BashArgs, BashToolConfig + + +@pytest.fixture +def bash(tmp_path): + config = BashToolConfig(workdir=tmp_path) + return Bash(config=config, state=BaseToolState()) + + +@pytest.mark.asyncio +async def test_runs_echo_successfully(bash): + result = await bash.run(BashArgs(command="echo hello")) + + assert result.returncode == 0 + assert result.stdout == "hello\n" + assert result.stderr == "" + + +@pytest.mark.asyncio +async def test_fails_cat_command_with_missing_file(bash): + with pytest.raises(ToolError) as err: + await bash.run(BashArgs(command="cat missing_file.txt")) + + message = str(err.value) + assert "Command failed" in message + assert "Return code: 1" in message + assert "No such file or directory" in message + + +@pytest.mark.asyncio +async def test_uses_effective_workdir(tmp_path): + config = BashToolConfig(workdir=tmp_path) + bash_tool = Bash(config=config, state=BaseToolState()) + + result = await bash_tool.run(BashArgs(command="pwd")) + + assert result.stdout.strip() == str(tmp_path) + + +@pytest.mark.asyncio +async def test_handles_timeout(bash): + with pytest.raises(ToolError) as err: + await bash.run(BashArgs(command="sleep 2", timeout=1)) + + assert "Command timed out after 1s" in str(err.value) + + +@pytest.mark.asyncio +async def test_truncates_output_to_max_bytes(bash): + config = BashToolConfig(workdir=None, max_output_bytes=5) + bash_tool = Bash(config=config, state=BaseToolState()) + + result = await bash_tool.run(BashArgs(command="printf 'abcdefghij'")) + + assert result.stdout == "abcde" + assert result.stderr == "" + assert result.returncode == 0 + + +@pytest.mark.asyncio +async def test_decodes_non_utf8_bytes(bash): + result = await bash.run(BashArgs(command="printf '\\xff\\xfe'")) + + # accept both possible encodings, as some shells emit escaped bytes as literal strings + assert result.stdout in {"��", "\xff\xfe", r"\xff\xfe"} + assert result.stderr == "" + + +def test_check_allowlist_denylist(): + config = BashToolConfig(allowlist=["echo", "pwd"], denylist=["rm"]) + bash_tool = Bash(config=config, state=BaseToolState()) + + allowlisted = bash_tool.check_allowlist_denylist(BashArgs(command="echo hi")) + denylisted = bash_tool.check_allowlist_denylist(BashArgs(command="rm -rf /tmp")) + mixed = bash_tool.check_allowlist_denylist(BashArgs(command="pwd && whoami")) + empty = bash_tool.check_allowlist_denylist(BashArgs(command="")) + + assert allowlisted is ToolPermission.ALWAYS + assert denylisted is ToolPermission.NEVER + assert mixed is None + assert empty is None diff --git a/tests/tools/test_grep.py b/tests/tools/test_grep.py new file mode 100644 index 0000000..6b8738a --- /dev/null +++ b/tests/tools/test_grep.py @@ -0,0 +1,347 @@ +from __future__ import annotations + +import shutil + +import pytest + +from vibe.core.tools.base import ToolError +from vibe.core.tools.builtins.grep import ( + Grep, + GrepArgs, + GrepBackend, + GrepState, + GrepToolConfig, +) + + +@pytest.fixture +def grep(tmp_path): + config = GrepToolConfig(workdir=tmp_path) + return Grep(config=config, state=GrepState()) + + +@pytest.fixture +def grep_gnu_only(tmp_path, monkeypatch): + original_which = shutil.which + + def mock_which(cmd): + if cmd == "rg": + return None + return original_which(cmd) + + monkeypatch.setattr("shutil.which", mock_which) + config = GrepToolConfig(workdir=tmp_path) + return Grep(config=config, state=GrepState()) + + +def test_detects_ripgrep_when_available(grep): + if shutil.which("rg"): + assert grep._detect_backend() == GrepBackend.RIPGREP + + +def test_falls_back_to_gnu_grep(grep, monkeypatch): + original_which = shutil.which + + def mock_which(cmd): + if cmd == "rg": + return None + return original_which(cmd) + + monkeypatch.setattr("shutil.which", mock_which) + + if shutil.which("grep"): + assert grep._detect_backend() == GrepBackend.GNU_GREP + + +def test_raises_error_if_no_grep_available(grep, monkeypatch): + monkeypatch.setattr("shutil.which", lambda cmd: None) + + with pytest.raises(ToolError) as err: + grep._detect_backend() + + assert "Neither ripgrep (rg) nor grep is installed" in str(err.value) + + +@pytest.mark.asyncio +async def test_finds_pattern_in_file(grep, tmp_path): + (tmp_path / "test.py").write_text("def hello():\n print('world')\n") + + result = await grep.run(GrepArgs(pattern="hello")) + + assert result.match_count == 1 + assert "hello" in result.matches + assert "test.py" in result.matches + assert not result.was_truncated + + +@pytest.mark.asyncio +async def test_finds_multiple_matches(grep, tmp_path): + (tmp_path / "test.py").write_text("foo\nbar\nfoo\nbaz\nfoo\n") + + result = await grep.run(GrepArgs(pattern="foo")) + + assert result.match_count == 3 + assert result.matches.count("foo") == 3 + assert not result.was_truncated + + +@pytest.mark.asyncio +async def test_returns_empty_on_no_matches(grep, tmp_path): + (tmp_path / "test.py").write_text("def hello():\n pass\n") + + result = await grep.run(GrepArgs(pattern="nonexistent")) + + assert result.match_count == 0 + assert result.matches == "" + assert not result.was_truncated + + +@pytest.mark.asyncio +async def test_fails_with_empty_pattern(grep): + with pytest.raises(ToolError) as err: + await grep.run(GrepArgs(pattern="")) + + assert "Empty search pattern" in str(err.value) + + +@pytest.mark.asyncio +async def test_fails_with_nonexistent_path(grep): + with pytest.raises(ToolError) as err: + await grep.run(GrepArgs(pattern="test", path="nonexistent")) + + assert "Path does not exist" in str(err.value) + + +@pytest.mark.asyncio +async def test_searches_in_specific_path(grep, tmp_path): + subdir = tmp_path / "subdir" + subdir.mkdir() + (subdir / "test.py").write_text("match here\n") + (tmp_path / "other.py").write_text("match here too\n") + + result = await grep.run(GrepArgs(pattern="match", path="subdir")) + + assert result.match_count == 1 + assert "subdir" in result.matches and "test.py" in result.matches + assert "other.py" not in result.matches + + +@pytest.mark.asyncio +async def test_truncates_to_max_matches(grep, tmp_path): + (tmp_path / "test.py").write_text("\n".join(f"line {i}" for i in range(200))) + + result = await grep.run(GrepArgs(pattern="line", max_matches=50)) + + assert result.match_count == 50 + assert result.was_truncated + + +@pytest.mark.asyncio +async def test_truncates_to_max_output_bytes(grep, tmp_path): + config = GrepToolConfig(workdir=tmp_path, max_output_bytes=100) + grep_tool = Grep(config=config, state=GrepState()) + (tmp_path / "test.py").write_text("\n".join("x" * 100 for _ in range(10))) + + result = await grep_tool.run(GrepArgs(pattern="x")) + + assert len(result.matches) <= 100 + assert result.was_truncated + + +@pytest.mark.asyncio +async def test_respects_default_ignore_patterns(grep, tmp_path): + (tmp_path / "included.py").write_text("match\n") + node_modules = tmp_path / "node_modules" + node_modules.mkdir() + (node_modules / "excluded.js").write_text("match\n") + + result = await grep.run(GrepArgs(pattern="match")) + + assert "included.py" in result.matches + assert "excluded.js" not in result.matches + + +@pytest.mark.asyncio +async def test_respects_vibeignore_file(grep, tmp_path): + (tmp_path / ".vibeignore").write_text("custom_dir/\n*.tmp\n") + custom_dir = tmp_path / "custom_dir" + custom_dir.mkdir() + (custom_dir / "excluded.py").write_text("match\n") + (tmp_path / "excluded.tmp").write_text("match\n") + (tmp_path / "included.py").write_text("match\n") + + result = await grep.run(GrepArgs(pattern="match")) + + assert "included.py" in result.matches + assert "excluded.py" not in result.matches + assert "excluded.tmp" not in result.matches + + +@pytest.mark.asyncio +async def test_ignores_comments_in_vibeignore(grep, tmp_path): + (tmp_path / ".vibeignore").write_text("# comment\npattern/\n# another comment\n") + (tmp_path / "file.py").write_text("match\n") + + result = await grep.run(GrepArgs(pattern="match")) + + assert result.match_count >= 1 + + +@pytest.mark.asyncio +async def test_tracks_search_history(grep, tmp_path): + (tmp_path / "test.py").write_text("content\n") + + await grep.run(GrepArgs(pattern="first")) + await grep.run(GrepArgs(pattern="second")) + await grep.run(GrepArgs(pattern="third")) + + assert grep.state.search_history == ["first", "second", "third"] + + +@pytest.mark.asyncio +async def test_uses_effective_workdir(tmp_path): + config = GrepToolConfig(workdir=tmp_path) + grep_tool = Grep(config=config, state=GrepState()) + (tmp_path / "test.py").write_text("match\n") + + result = await grep_tool.run(GrepArgs(pattern="match", path=".")) + + assert result.match_count == 1 + assert "test.py" in result.matches + + +@pytest.mark.skipif(not shutil.which("grep"), reason="GNU grep not available") +class TestGnuGrepBackend: + @pytest.mark.asyncio + async def test_finds_pattern_in_file(self, grep_gnu_only, tmp_path): + (tmp_path / "test.py").write_text("def hello():\n print('world')\n") + + result = await grep_gnu_only.run(GrepArgs(pattern="hello")) + + assert result.match_count == 1 + assert "hello" in result.matches + assert "test.py" in result.matches + + @pytest.mark.asyncio + async def test_finds_multiple_matches(self, grep_gnu_only, tmp_path): + (tmp_path / "test.py").write_text("foo\nbar\nfoo\nbaz\nfoo\n") + + result = await grep_gnu_only.run(GrepArgs(pattern="foo")) + + assert result.match_count == 3 + assert result.matches.count("foo") == 3 + + @pytest.mark.asyncio + async def test_returns_empty_on_no_matches(self, grep_gnu_only, tmp_path): + (tmp_path / "test.py").write_text("def hello():\n pass\n") + + result = await grep_gnu_only.run(GrepArgs(pattern="nonexistent")) + + assert result.match_count == 0 + assert result.matches == "" + + @pytest.mark.asyncio + async def test_case_insensitive_for_lowercase_pattern( + self, grep_gnu_only, tmp_path + ): + (tmp_path / "test.py").write_text("Hello\nHELLO\nhello\n") + + result = await grep_gnu_only.run(GrepArgs(pattern="hello")) + + assert result.match_count == 3 + + @pytest.mark.asyncio + async def test_case_sensitive_for_mixed_case_pattern(self, grep_gnu_only, tmp_path): + (tmp_path / "test.py").write_text("Hello\nHELLO\nhello\n") + + result = await grep_gnu_only.run(GrepArgs(pattern="Hello")) + + assert result.match_count == 1 + + @pytest.mark.asyncio + async def test_respects_exclude_patterns(self, grep_gnu_only, tmp_path): + (tmp_path / "included.py").write_text("match\n") + node_modules = tmp_path / "node_modules" + node_modules.mkdir() + (node_modules / "excluded.js").write_text("match\n") + + result = await grep_gnu_only.run(GrepArgs(pattern="match")) + + assert "included.py" in result.matches + assert "excluded.js" not in result.matches + + @pytest.mark.asyncio + async def test_searches_in_specific_path(self, grep_gnu_only, tmp_path): + subdir = tmp_path / "subdir" + subdir.mkdir() + (subdir / "test.py").write_text("match here\n") + (tmp_path / "other.py").write_text("match here too\n") + + result = await grep_gnu_only.run(GrepArgs(pattern="match", path="subdir")) + + assert result.match_count == 1 + assert "other.py" not in result.matches + + @pytest.mark.asyncio + async def test_respects_vibeignore_file(self, grep_gnu_only, tmp_path): + (tmp_path / ".vibeignore").write_text("custom_dir/\n*.tmp\n") + custom_dir = tmp_path / "custom_dir" + custom_dir.mkdir() + (custom_dir / "excluded.py").write_text("match\n") + (tmp_path / "excluded.tmp").write_text("match\n") + (tmp_path / "included.py").write_text("match\n") + + result = await grep_gnu_only.run(GrepArgs(pattern="match")) + + assert "included.py" in result.matches + assert "excluded.py" not in result.matches + assert "excluded.tmp" not in result.matches + + @pytest.mark.asyncio + async def test_truncates_to_max_matches(self, grep_gnu_only, tmp_path): + (tmp_path / "test.py").write_text("\n".join(f"line {i}" for i in range(200))) + + result = await grep_gnu_only.run(GrepArgs(pattern="line", max_matches=50)) + + assert result.match_count == 50 + assert result.was_truncated + + +@pytest.mark.skipif(not shutil.which("rg"), reason="ripgrep not available") +class TestRipgrepBackend: + @pytest.mark.asyncio + async def test_smart_case_lowercase_pattern(self, grep, tmp_path): + (tmp_path / "test.py").write_text("Hello\nHELLO\nhello\n") + + result = await grep.run(GrepArgs(pattern="hello")) + + assert result.match_count == 3 + + @pytest.mark.asyncio + async def test_smart_case_mixed_case_pattern(self, grep, tmp_path): + (tmp_path / "test.py").write_text("Hello\nHELLO\nhello\n") + + result = await grep.run(GrepArgs(pattern="Hello")) + + assert result.match_count == 1 + + @pytest.mark.asyncio + async def test_searches_ignored_files_when_use_default_ignore_false( + self, grep, tmp_path + ): + (tmp_path / ".ignore").write_text("ignored_by_rg/\n") + + ignored_dir = tmp_path / "ignored_by_rg" + ignored_dir.mkdir() + (ignored_dir / "file.py").write_text("match\n") + (tmp_path / "included.py").write_text("match\n") + + result_with_ignore = await grep.run(GrepArgs(pattern="match")) + assert "included.py" in result_with_ignore.matches + assert "ignored_by_rg" not in result_with_ignore.matches + + result_without_ignore = await grep.run( + GrepArgs(pattern="match", use_default_ignore=False) + ) + assert "included.py" in result_without_ignore.matches + assert "ignored_by_rg/file.py" in result_without_ignore.matches diff --git a/tests/tools/test_ui_bash_execution.py b/tests/tools/test_ui_bash_execution.py new file mode 100644 index 0000000..ba564ba --- /dev/null +++ b/tests/tools/test_ui_bash_execution.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +from pathlib import Path +import time + +import pytest +from textual.widgets import Static + +from vibe.cli.textual_ui.app import VibeApp +from vibe.cli.textual_ui.widgets.chat_input.container import ChatInputContainer +from vibe.cli.textual_ui.widgets.messages import BashOutputMessage, ErrorMessage +from vibe.core.config import SessionLoggingConfig, VibeConfig + + +@pytest.fixture +def vibe_config(tmp_path: Path) -> VibeConfig: + return VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), workdir=tmp_path + ) + + +@pytest.fixture +def vibe_app(vibe_config: VibeConfig) -> VibeApp: + return VibeApp(config=vibe_config) + + +async def _wait_for_bash_output_message( + vibe_app: VibeApp, pilot, timeout: float = 1.0 +) -> BashOutputMessage: + deadline = time.monotonic() + timeout + while time.monotonic() < deadline: + if message := next(iter(vibe_app.query(BashOutputMessage)), None): + return message + await pilot.pause(0.05) + raise TimeoutError(f"BashOutputMessage did not appear within {timeout}s") + + +def assert_no_command_error(vibe_app: VibeApp) -> None: + errors = list(vibe_app.query(ErrorMessage)) + if not errors: + return + + disallowed = { + "Command failed", + "Command timed out", + "No command provided after '!'", + } + offending = [ + getattr(err, "_error", "") + for err in errors + if getattr(err, "_error", "") + and any(phrase in getattr(err, "_error", "") for phrase in disallowed) + ] + assert not offending, f"Unexpected command errors: {offending}" + + +@pytest.mark.asyncio +async def test_ui_reports_no_output(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "!true" + + await pilot.press("enter") + message = await _wait_for_bash_output_message(vibe_app, pilot) + output_widget = message.query_one(".bash-output", Static) + assert str(output_widget.render()) == "(no output)" + assert_no_command_error(vibe_app) + + +@pytest.mark.asyncio +async def test_ui_shows_success_in_case_of_zero_code(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "!true" + + await pilot.press("enter") + message = await _wait_for_bash_output_message(vibe_app, pilot) + icon = message.query_one(".bash-exit-success", Static) + assert str(icon.render()) == "✓" + assert not list(message.query(".bash-exit-failure")) + + +@pytest.mark.asyncio +async def test_ui_shows_failure_in_case_of_non_zero_code(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "!bash -lc 'exit 7'" + + await pilot.press("enter") + message = await _wait_for_bash_output_message(vibe_app, pilot) + icon = message.query_one(".bash-exit-failure", Static) + assert str(icon.render()) == "✗" + code = message.query_one(".bash-exit-code", Static) + assert "7" in str(code.render()) + assert not list(message.query(".bash-exit-success")) + + +@pytest.mark.asyncio +async def test_ui_handles_non_utf8_output(vibe_app: VibeApp) -> None: + """Assert the UI accepts decoding a non-UTF8 sequence like `printf '\xf0\x9f\x98'`. + Whereas `printf '\xf0\x9f\x98\x8b'` prints a smiley face (😋) and would work even without those changes. + """ + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "!printf '\\xff\\xfe'" + + await pilot.press("enter") + message = await _wait_for_bash_output_message(vibe_app, pilot) + output_widget = message.query_one(".bash-output", Static) + # accept both possible encodings, as some shells emit escaped bytes as literal strings + assert str(output_widget.render()) in {"��", "\xff\xfe", r"\xff\xfe"} + assert_no_command_error(vibe_app) + + +@pytest.mark.asyncio +async def test_ui_handles_utf8_output(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "!echo hello" + + await pilot.press("enter") + message = await _wait_for_bash_output_message(vibe_app, pilot) + output_widget = message.query_one(".bash-output", Static) + assert str(output_widget.render()) == "hello\n" + assert_no_command_error(vibe_app) + + +@pytest.mark.asyncio +async def test_ui_handles_non_utf8_stderr(vibe_app: VibeApp) -> None: + async with vibe_app.run_test() as pilot: + chat_input = vibe_app.query_one(ChatInputContainer) + chat_input.value = "!bash -lc \"printf '\\\\xff\\\\xfe' 1>&2\"" + + await pilot.press("enter") + message = await _wait_for_bash_output_message(vibe_app, pilot) + output_widget = message.query_one(".bash-output", Static) + assert str(output_widget.render()) == "��" + assert_no_command_error(vibe_app) diff --git a/tests/update_notifier/test_github_version_update_gateway.py b/tests/update_notifier/test_github_version_update_gateway.py new file mode 100644 index 0000000..df3861c --- /dev/null +++ b/tests/update_notifier/test_github_version_update_gateway.py @@ -0,0 +1,249 @@ +from __future__ import annotations + +from collections.abc import Callable + +import httpx +import pytest + +from vibe.cli.update_notifier.github_version_update_gateway import ( + GitHubVersionUpdateGateway, +) +from vibe.cli.update_notifier.version_update_gateway import ( + VersionUpdateGatewayCause, + VersionUpdateGatewayError, +) + +Handler = Callable[[httpx.Request], httpx.Response] + +GITHUB_API_URL = "https://api.github.com" + + +def _raise_connect_timeout(request: httpx.Request) -> httpx.Response: + raise httpx.ConnectTimeout("boom", request=request) + + +@pytest.mark.asyncio +async def test_retrieves_latest_version_when_available() -> None: + def handler(request: httpx.Request) -> httpx.Response: + assert request.headers.get("Authorization") == "Bearer token" + return httpx.Response( + status_code=httpx.codes.OK, + json=[{"tag_name": "v1.2.3", "prerelease": False, "draft": False}], + ) + + transport = httpx.MockTransport(handler) + async with httpx.AsyncClient( + transport=transport, base_url=GITHUB_API_URL + ) as client: + notifier = GitHubVersionUpdateGateway( + "owner", "repo", token="token", client=client + ) + update = await notifier.fetch_update() + + assert update is not None + assert update.latest_version == "1.2.3" + + +@pytest.mark.asyncio +async def test_strips_uppercase_prefix_from_tag_name() -> None: + def handler(request: httpx.Request) -> httpx.Response: + return httpx.Response( + status_code=httpx.codes.OK, + json=[{"tag_name": "V0.9.0", "prerelease": False, "draft": False}], + ) + + transport = httpx.MockTransport(handler) + async with httpx.AsyncClient( + transport=transport, base_url=GITHUB_API_URL + ) as client: + notifier = GitHubVersionUpdateGateway("owner", "repo", client=client) + update = await notifier.fetch_update() + + assert update is not None + assert update.latest_version == "0.9.0" + + +@pytest.mark.asyncio +async def test_considers_no_update_available_when_no_releases_are_found() -> None: + """If the repository cannot be accessed (e.g. invalid token), the response will be 404. + But using API 'releases/latest', if no release has been created, the response will ALSO be 404. + + This test ensures that we consider no update available when no releases are found. + (And this is why we are using "releases" with a per_page=1 parameter, instead of "releases/latest") + """ + + def handler(request: httpx.Request) -> httpx.Response: + return httpx.Response(status_code=httpx.codes.OK, json=[]) + + transport = httpx.MockTransport(handler) + async with httpx.AsyncClient( + transport=transport, base_url=GITHUB_API_URL + ) as client: + notifier = GitHubVersionUpdateGateway("owner", "repo", client=client) + update = await notifier.fetch_update() + + assert update is None + + +@pytest.mark.asyncio +async def test_considers_no_update_available_when_only_drafts_and_prereleases_are_found() -> ( + None +): + def handler(request: httpx.Request) -> httpx.Response: + return httpx.Response( + status_code=httpx.codes.OK, + json=[ + {"tag_name": "v2.0.0-beta", "prerelease": True, "draft": False}, + {"tag_name": "v2.0.0", "prerelease": False, "draft": True}, + ], + ) + + transport = httpx.MockTransport(handler) + async with httpx.AsyncClient( + transport=transport, base_url=GITHUB_API_URL + ) as client: + notifier = GitHubVersionUpdateGateway("owner", "repo", client=client) + update = await notifier.fetch_update() + + assert update is None + + +@pytest.mark.asyncio +async def test_picks_the_most_recently_published_non_prerelease_and_non_draft() -> None: + def handler(request: httpx.Request) -> httpx.Response: + return httpx.Response( + status_code=httpx.codes.OK, + json=[ + { + "tag_name": "v2.0.0-beta", + "prerelease": True, + "draft": False, + "published_at": "2025-10-25T112:00:00Z", + }, + { + "tag_name": "v2.0.0", + "prerelease": False, + "draft": True, + "published_at": "2025-10-26T112:00:00Z", + }, + { + "tag_name": "v1.12.455", + "prerelease": False, + "draft": False, + "published_at": "2025-11-02T112:00:00Z", + }, + { + "tag_name": "1.12.400", + "prerelease": False, + "draft": False, + "published_at": "2025-11-10T112:00:00Z", + }, + { + "tag_name": "1.12.300", + "prerelease": False, + "draft": False, + "published_at": "2025-11-11T112:00:00Z", + }, + ], + ) + + transport = httpx.MockTransport(handler) + async with httpx.AsyncClient( + transport=transport, base_url=GITHUB_API_URL + ) as client: + notifier = GitHubVersionUpdateGateway("owner", "repo", client=client) + update = await notifier.fetch_update() + + assert update is not None + assert update.latest_version == "1.12.300" + + +@pytest.mark.parametrize( + "payload", + [ + [{"tag_name": "v2.0.0-beta", "prerelease": True, "draft": False}], + [{"tag_name": "v2.0.0", "prerelease": False, "draft": True}], + ], +) +@pytest.mark.asyncio +async def test_ignores_draft_releases_and_prereleases( + payload: dict[str, object], +) -> None: + def handler(request: httpx.Request) -> httpx.Response: + return httpx.Response(status_code=httpx.codes.OK, json=payload) + + transport = httpx.MockTransport(handler) + async with httpx.AsyncClient( + transport=transport, base_url=GITHUB_API_URL + ) as client: + notifier = GitHubVersionUpdateGateway("owner", "repo", client=client) + update = await notifier.fetch_update() + + assert update is None + + +@pytest.mark.parametrize( + ("handler", "expected_cause", "expected_custom_message"), + [ + ( + lambda _: httpx.Response(status_code=httpx.codes.NOT_FOUND), + VersionUpdateGatewayCause.NOT_FOUND, + "Unable to fetch the GitHub releases. Did you export a GITHUB_TOKEN environment variable?", + ), + ( + lambda _: httpx.Response( + status_code=httpx.codes.FORBIDDEN, + headers={"X-RateLimit-Remaining": "0"}, + ), + VersionUpdateGatewayCause.TOO_MANY_REQUESTS, + None, + ), + ( + lambda _: httpx.Response(status_code=httpx.codes.TOO_MANY_REQUESTS), + VersionUpdateGatewayCause.TOO_MANY_REQUESTS, + None, + ), + ( + lambda _: httpx.Response(status_code=httpx.codes.FORBIDDEN), + VersionUpdateGatewayCause.FORBIDDEN, + None, + ), + ( + lambda _: httpx.Response(status_code=httpx.codes.INTERNAL_SERVER_ERROR), + VersionUpdateGatewayCause.ERROR_RESPONSE, + None, + ), + ( + lambda _: httpx.Response(status_code=httpx.codes.OK, text="not json"), + VersionUpdateGatewayCause.INVALID_RESPONSE, + None, + ), + (_raise_connect_timeout, VersionUpdateGatewayCause.REQUEST_FAILED, None), + ], + ids=[ + "not_found", + "rate_limit_header", + "rate_limit_status", + "forbidden", + "error_response", + "invalid_json", + "request_error", + ], +) +@pytest.mark.asyncio +async def test_retrieves_nothing_when_fetching_update_fails( + handler: Handler, + expected_cause: VersionUpdateGatewayCause, + expected_custom_message: str | None, +) -> None: + transport = httpx.MockTransport(handler) + async with httpx.AsyncClient( + transport=transport, base_url=GITHUB_API_URL + ) as client: + notifier = GitHubVersionUpdateGateway("owner", "repo", client=client) + with pytest.raises(VersionUpdateGatewayError) as excinfo: + await notifier.fetch_update() + + assert excinfo.value.cause == expected_cause + if expected_custom_message is not None: + assert str(excinfo.value) == expected_custom_message diff --git a/tests/update_notifier/test_ui_version_update_notification.py b/tests/update_notifier/test_ui_version_update_notification.py new file mode 100644 index 0000000..91a60ca --- /dev/null +++ b/tests/update_notifier/test_ui_version_update_notification.py @@ -0,0 +1,161 @@ +from __future__ import annotations + +import asyncio +from typing import Protocol + +import pytest +from textual.app import Notification + +from vibe.cli.textual_ui.app import VibeApp +from vibe.cli.update_notifier.fake_version_update_gateway import ( + FakeVersionUpdateGateway, +) +from vibe.cli.update_notifier.version_update_gateway import ( + VersionUpdate, + VersionUpdateGatewayCause, + VersionUpdateGatewayError, +) +from vibe.core.config import SessionLoggingConfig, VibeConfig + + +async def _wait_for_notification( + app: VibeApp, pilot, *, timeout: float = 1.0, interval: float = 0.05 +) -> Notification: + loop = asyncio.get_running_loop() + deadline = loop.time() + timeout + + while loop.time() < deadline: + notifications = list(app._notifications) + if notifications: + return notifications[-1] + await pilot.pause(interval) + + pytest.fail("Notification not displayed") + + +async def _assert_no_notifications( + app: VibeApp, pilot, *, timeout: float = 1.0, interval: float = 0.05 +) -> None: + loop = asyncio.get_running_loop() + deadline = loop.time() + timeout + + while loop.time() < deadline: + if app._notifications: + pytest.fail("Notification unexpectedly displayed") + await pilot.pause(interval) + + assert not app._notifications + + +@pytest.fixture +def vibe_config_with_update_checks_enabled() -> VibeConfig: + return VibeConfig( + session_logging=SessionLoggingConfig(enabled=False), enable_update_checks=True + ) + + +class VibeAppFactory(Protocol): + def __call__( + self, + *, + notifier: FakeVersionUpdateGateway, + config: VibeConfig | None = None, + auto_approve: bool = False, + current_version: str = "0.1.0", + ) -> VibeApp: ... + + +@pytest.fixture +def make_vibe_app(vibe_config_with_update_checks_enabled: VibeConfig) -> VibeAppFactory: + def _make_app( + *, + notifier: FakeVersionUpdateGateway, + config: VibeConfig | None = None, + auto_approve: bool = False, + current_version: str = "0.1.0", + ) -> VibeApp: + return VibeApp( + config=config or vibe_config_with_update_checks_enabled, + auto_approve=auto_approve, + version_update_notifier=notifier, + current_version=current_version, + ) + + return _make_app + + +@pytest.mark.asyncio +async def test_ui_displays_update_notification(make_vibe_app: VibeAppFactory) -> None: + notifier = FakeVersionUpdateGateway(update=VersionUpdate(latest_version="0.2.0")) + app = make_vibe_app(notifier=notifier) + + async with app.run_test() as pilot: + notification = await _wait_for_notification(app, pilot, timeout=0.3) + + assert notification.severity == "information" + assert notification.title == "Update available" + assert ( + notification.message + == '0.1.0 => 0.2.0\nRun "uv tool upgrade mistral-vibe" to update' + ) + + +@pytest.mark.asyncio +async def test_ui_does_not_display_update_notification_when_not_available( + make_vibe_app: VibeAppFactory, +) -> None: + notifier = FakeVersionUpdateGateway(update=None) + app = make_vibe_app(notifier=notifier) + + async with app.run_test() as pilot: + await _assert_no_notifications(app, pilot, timeout=0.3) + assert notifier.fetch_update_calls == 1 + + +@pytest.mark.asyncio +async def test_ui_displays_warning_toast_when_check_fails( + make_vibe_app: VibeAppFactory, +) -> None: + notifier = FakeVersionUpdateGateway( + error=VersionUpdateGatewayError(cause=VersionUpdateGatewayCause.FORBIDDEN) + ) + app = make_vibe_app(notifier=notifier) + + async with app.run_test() as pilot: + await pilot.pause(0.3) + notifications = list(app._notifications) + + assert notifications + warning = notifications[-1] + assert warning.severity == "warning" + assert "forbidden" in warning.message.lower() + + +@pytest.mark.asyncio +async def test_ui_does_not_invoke_gateway_nor_show_error_notification_when_update_checks_are_disabled( + vibe_config_with_update_checks_enabled: VibeConfig, make_vibe_app: VibeAppFactory +) -> None: + config = vibe_config_with_update_checks_enabled + config.enable_update_checks = False + notifier = FakeVersionUpdateGateway(update=VersionUpdate(latest_version="0.2.0")) + app = make_vibe_app(notifier=notifier, config=config) + + async with app.run_test() as pilot: + await _assert_no_notifications(app, pilot, timeout=0.3) + + assert notifier.fetch_update_calls == 0 + + +@pytest.mark.asyncio +async def test_ui_does_not_invoke_gateway_nor_show_update_notification_when_update_checks_are_disabled( + vibe_config_with_update_checks_enabled: VibeConfig, make_vibe_app: VibeAppFactory +) -> None: + config = vibe_config_with_update_checks_enabled + config.enable_update_checks = False + notifier = FakeVersionUpdateGateway(update=VersionUpdate(latest_version="0.2.0")) + app = make_vibe_app(notifier=notifier, config=config) + + async with app.run_test() as pilot: + await _assert_no_notifications(app, pilot, timeout=0.3) + + assert notifier.fetch_update_calls == 0 diff --git a/tests/update_notifier/test_version_update_use_case.py b/tests/update_notifier/test_version_update_use_case.py new file mode 100644 index 0000000..85e7195 --- /dev/null +++ b/tests/update_notifier/test_version_update_use_case.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +import pytest + +from vibe.cli.update_notifier.fake_version_update_gateway import ( + FakeVersionUpdateGateway, +) +from vibe.cli.update_notifier.version_update import ( + VersionUpdateError, + is_version_update_available, +) +from vibe.cli.update_notifier.version_update_gateway import ( + VersionUpdate, + VersionUpdateGatewayCause, + VersionUpdateGatewayError, +) + + +@pytest.mark.asyncio +async def test_retrieves_the_latest_version_update_when_available() -> None: + latest_update = "1.0.3" + version_update_notifier = FakeVersionUpdateGateway( + update=VersionUpdate(latest_version=latest_update) + ) + + update = await is_version_update_available( + version_update_notifier, current_version="1.0.0" + ) + + assert update is not None + assert update.latest_version == latest_update + + +@pytest.mark.asyncio +async def test_retrieves_nothing_when_the_current_version_is_the_latest() -> None: + current_version = "1.0.0" + latest_version = "1.0.0" + version_update_notifier = FakeVersionUpdateGateway( + update=VersionUpdate(latest_version=latest_version) + ) + + update = await is_version_update_available( + version_update_notifier, current_version=current_version + ) + + assert update is None + + +@pytest.mark.asyncio +async def test_retrieves_nothing_when_the_current_version_is_greater_than_the_latest() -> ( + None +): + current_version = "0.2.0" + latest_version = "0.1.2" + version_update_notifier = FakeVersionUpdateGateway( + update=VersionUpdate(latest_version=latest_version) + ) + + update = await is_version_update_available( + version_update_notifier, current_version=current_version + ) + + assert update is None + + +@pytest.mark.asyncio +async def test_retrieves_nothing_when_no_version_is_available() -> None: + version_update_notifier = FakeVersionUpdateGateway(update=None) + + update = await is_version_update_available( + version_update_notifier, current_version="1.0.0" + ) + + assert update is None + + +@pytest.mark.asyncio +async def test_retrieves_nothing_when_latest_version_is_invalid() -> None: + version_update_notifier = FakeVersionUpdateGateway( + update=VersionUpdate(latest_version="invalid-version") + ) + + update = await is_version_update_available( + version_update_notifier, current_version="1.0.0" + ) + + assert update is None + + +@pytest.mark.asyncio +async def test_replaces_hyphens_with_plus_signs_in_latest_version_to_conform_with_PEP_440() -> ( + None +): + version_update_notifier = FakeVersionUpdateGateway( + # if we were not replacing hyphens with plus signs, this should fail for PEP 440 + update=VersionUpdate(latest_version="1.6.1-jetbrains") + ) + + update = await is_version_update_available( + version_update_notifier, current_version="1.0.0" + ) + + assert update is not None + assert update.latest_version == "1.6.1-jetbrains" + + +@pytest.mark.asyncio +async def test_retrieves_nothing_when_current_version_is_invalid() -> None: + version_update_notifier = FakeVersionUpdateGateway( + update=VersionUpdate(latest_version="1.0.1") + ) + + update = await is_version_update_available( + version_update_notifier, current_version="invalid-version" + ) + + assert update is None + + +@pytest.mark.parametrize( + ("cause", "expected_message_substring"), + [ + (VersionUpdateGatewayCause.TOO_MANY_REQUESTS, "Rate limit exceeded"), + (VersionUpdateGatewayCause.INVALID_RESPONSE, "invalid response"), + ( + VersionUpdateGatewayCause.NOT_FOUND, + "Unable to fetch the releases. Please check your permissions.", + ), + (VersionUpdateGatewayCause.ERROR_RESPONSE, "Unexpected response"), + (VersionUpdateGatewayCause.REQUEST_FAILED, "Network error"), + ], +) +@pytest.mark.asyncio +async def test_raises_version_update_error( + cause: VersionUpdateGatewayCause, expected_message_substring: str +) -> None: + version_update_notifier = FakeVersionUpdateGateway( + error=VersionUpdateGatewayError(cause=cause) + ) + + with pytest.raises(VersionUpdateError) as excinfo: + await is_version_update_available( + version_update_notifier, current_version="1.0.0" + ) + + assert expected_message_substring in str(excinfo.value) diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..e2dda2e --- /dev/null +++ b/uv.lock @@ -0,0 +1,1715 @@ +version = 1 +revision = 3 +requires-python = ">=3.12" + +[[package]] +name = "agent-client-protocol" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/fe/147187918c5ba695db537b3088c441bcace4ac9365fae532bf36b1494769/agent_client_protocol-0.6.3.tar.gz", hash = "sha256:ea01a51d5b55864c606401694dad429d83c5bedb476807d81b8208031d6cf3d8", size = 152382, upload-time = "2025-11-03T20:09:19.027Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/2e/62d1770a489d3356cd75e19cd61583e7e411f1b00ab9859c73048621e4c2/agent_client_protocol-0.6.3-py3-none-any.whl", hash = "sha256:184264bd6988731613a49c9eb89d7ecd23c6afffe905c64f1b604a42a9b20aef", size = 47613, upload-time = "2025-11-03T20:09:17.427Z" }, +] + +[[package]] +name = "aiofiles" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/c3/534eac40372d8ee36ef40df62ec129bee4fdb5ad9706e58a29be53b2c970/aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2", size = 46354, upload-time = "2025-10-09T20:51:04.358Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/8a/340a1555ae33d7354dbca4faa54948d76d89a27ceef032c8c3bc661d003e/aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695", size = 14668, upload-time = "2025-10-09T20:51:03.174Z" }, +] + +[[package]] +name = "altgraph" +version = "0.17.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/f8/97fdf103f38fed6792a1601dbc16cc8aac56e7459a9fff08c812d8ae177a/altgraph-0.17.5.tar.gz", hash = "sha256:c87b395dd12fabde9c99573a9749d67da8d29ef9de0125c7f536699b4a9bc9e7", size = 48428, upload-time = "2025-11-21T20:35:50.583Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/ba/000a1996d4308bc65120167c21241a3b205464a2e0b58deda26ae8ac21d1/altgraph-0.17.5-py2.py3-none-any.whl", hash = "sha256:f3a22400bce1b0c701683820ac4f3b159cd301acab067c51c653e06961600597", size = 21228, upload-time = "2025-11-21T20:35:49.444Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "cfgv" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/b5/721b8799b04bf9afe054a3899c6cf4e880fcf8563cc71c15610242490a0c/cfgv-3.5.0.tar.gz", hash = "sha256:d5b1034354820651caa73ede66a6294d6e95c1b00acc5e9b098e917404669132", size = 7334, upload-time = "2025-11-19T20:55:51.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/3c/33bac158f8ab7f89b2e59426d5fe2e4f63f7ed25df84c036890172b412b5/cfgv-3.5.0-py2.py3-none-any.whl", hash = "sha256:a8dc6b26ad22ff227d2634a65cb388215ce6cc96bbcc5cfde7641ae87e8dacc0", size = 7445, upload-time = "2025-11-19T20:55:50.744Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, + { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, + { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, + { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, + { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, + { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, + { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, + { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, + { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, + { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, + { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, + { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, + { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, + { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, + { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, + { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, + { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, + { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, + { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, + { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, + { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, + { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, + { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, + { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, + { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, + { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, + { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, + { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, + { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, + { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, + { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, + { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, + { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, + { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, + { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, + { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, +] + +[[package]] +name = "distlib" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, +] + +[[package]] +name = "docutils" +version = "0.22.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d9/02/111134bfeb6e6c7ac4c74594e39a59f6c0195dc4846afbeac3cba60f1927/docutils-0.22.3.tar.gz", hash = "sha256:21486ae730e4ca9f622677b1412b879af1791efcfba517e4c6f60be543fc8cdd", size = 2290153, upload-time = "2025-11-06T02:35:55.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/a8/c6a4b901d17399c77cd81fb001ce8961e9f5e04d3daf27e8925cb012e163/docutils-0.22.3-py3-none-any.whl", hash = "sha256:bd772e4aca73aff037958d44f2be5229ded4c09927fcf8690c577b66234d6ceb", size = 633032, upload-time = "2025-11-06T02:35:52.391Z" }, +] + +[[package]] +name = "eval-type-backport" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, +] + +[[package]] +name = "execnet" +version = "2.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622, upload-time = "2025-11-12T09:56:37.75Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" }, +] + +[[package]] +name = "filelock" +version = "3.20.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/46/0028a82567109b5ef6e4d2a1f04a583fb513e6cf9527fcdd09afd817deeb/filelock-3.20.0.tar.gz", hash = "sha256:711e943b4ec6be42e1d4e6690b48dc175c822967466bb31c0c293f34334c13f4", size = 18922, upload-time = "2025-10-08T18:03:50.056Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/91/7216b27286936c16f5b4d0c530087e4a54eead683e6b0b73dd0c64844af6/filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2", size = 16054, upload-time = "2025-10-08T18:03:48.35Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/4c/751061ffa58615a32c31b2d82e8482be8dd4a89154f003147acee90f2be9/httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d", size = 15943, upload-time = "2025-10-10T21:48:22.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, +] + +[[package]] +name = "id" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/11/102da08f88412d875fa2f1a9a469ff7ad4c874b0ca6fed0048fe385bdb3d/id-1.5.0.tar.gz", hash = "sha256:292cb8a49eacbbdbce97244f47a97b4c62540169c976552e497fd57df0734c1d", size = 15237, upload-time = "2024-12-04T19:53:05.575Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9f/cb/18326d2d89ad3b0dd143da971e77afd1e6ca6674f1b1c3df4b6bec6279fc/id-1.5.0-py3-none-any.whl", hash = "sha256:f1434e1cef91f2cbb8a4ec64663d5a23b9ed43ef44c4c957d02583d61714c658", size = 13611, upload-time = "2024-12-04T19:53:03.02Z" }, +] + +[[package]] +name = "identify" +version = "2.6.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ff/e7/685de97986c916a6d93b3876139e00eef26ad5bbbd61925d670ae8013449/identify-2.6.15.tar.gz", hash = "sha256:e4f4864b96c6557ef2a1e1c951771838f4edc9df3a72ec7118b338801b11c7bf", size = 99311, upload-time = "2025-10-02T17:43:40.631Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/1c/e5fd8f973d4f375adb21565739498e2e9a1e54c858a97b9a8ccfdc81da9b/identify-2.6.15-py2.py3-none-any.whl", hash = "sha256:1181ef7608e00704db228516541eb83a88a9f94433a8c80bb9b5bd54b1d81757", size = 99183, upload-time = "2025-10-02T17:43:39.137Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "invoke" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/bd/b461d3424a24c80490313fd77feeb666ca4f6a28c7e72713e3d9095719b4/invoke-2.2.1.tar.gz", hash = "sha256:515bf49b4a48932b79b024590348da22f39c4942dff991ad1fb8b8baea1be707", size = 304762, upload-time = "2025-10-11T00:36:35.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" }, +] + +[[package]] +name = "jaraco-classes" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "more-itertools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/c0/ed4a27bc5571b99e3cff68f8a9fa5b56ff7df1c2251cc715a652ddd26402/jaraco.classes-3.4.0.tar.gz", hash = "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd", size = 11780, upload-time = "2024-03-31T07:27:36.643Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/66/b15ce62552d84bbfcec9a4873ab79d993a1dd4edb922cbfccae192bd5b5f/jaraco.classes-3.4.0-py3-none-any.whl", hash = "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790", size = 6777, upload-time = "2024-03-31T07:27:34.792Z" }, +] + +[[package]] +name = "jaraco-context" +version = "6.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/ad/f3777b81bf0b6e7bc7514a1656d3e637b2e8e15fab2ce3235730b3e7a4e6/jaraco_context-6.0.1.tar.gz", hash = "sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3", size = 13912, upload-time = "2024-08-20T03:39:27.358Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/db/0c52c4cf5e4bd9f5d7135ec7669a3a767af21b3a308e1ed3674881e52b62/jaraco.context-6.0.1-py3-none-any.whl", hash = "sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4", size = 6825, upload-time = "2024-08-20T03:39:25.966Z" }, +] + +[[package]] +name = "jaraco-functools" +version = "4.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "more-itertools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/ed/1aa2d585304ec07262e1a83a9889880701079dde796ac7b1d1826f40c63d/jaraco_functools-4.3.0.tar.gz", hash = "sha256:cfd13ad0dd2c47a3600b439ef72d8615d482cedcff1632930d6f28924d92f294", size = 19755, upload-time = "2025-08-18T20:05:09.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/09/726f168acad366b11e420df31bf1c702a54d373a83f968d94141a8c3fde0/jaraco_functools-4.3.0-py3-none-any.whl", hash = "sha256:227ff8ed6f7b8f62c56deff101545fa7543cf2c8e7b82a7c2116e672f29c26e8", size = 10408, upload-time = "2025-08-18T20:05:08.69Z" }, +] + +[[package]] +name = "jeepney" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7b/6f/357efd7602486741aa73ffc0617fb310a29b588ed0fd69c2399acbb85b0c/jeepney-0.9.0.tar.gz", hash = "sha256:cf0e9e845622b81e4a28df94c40345400256ec608d0e55bb8a3feaa9163f5732", size = 106758, upload-time = "2025-02-27T18:51:01.684Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/a3/e137168c9c44d18eff0376253da9f1e9234d0239e0ee230d2fee6cea8e55/jeepney-0.9.0-py3-none-any.whl", hash = "sha256:97e5714520c16fc0a45695e5365a2e11b81ea79bba796e26f9f1d178cb182683", size = 49010, upload-time = "2025-02-27T18:51:00.104Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "keyring" +version = "25.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jaraco-classes" }, + { name = "jaraco-context" }, + { name = "jaraco-functools" }, + { name = "jeepney", marker = "sys_platform == 'linux'" }, + { name = "pywin32-ctypes", marker = "sys_platform == 'win32'" }, + { name = "secretstorage", marker = "sys_platform == 'linux'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/4b/674af6ef2f97d56f0ab5153bf0bfa28ccb6c3ed4d1babf4305449668807b/keyring-25.7.0.tar.gz", hash = "sha256:fe01bd85eb3f8fb3dd0405defdeac9a5b4f6f0439edbb3149577f244a2e8245b", size = 63516, upload-time = "2025-11-16T16:26:09.482Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/db/e655086b7f3a705df045bf0933bdd9c2f79bb3c97bfef1384598bb79a217/keyring-25.7.0-py3-none-any.whl", hash = "sha256:be4a0b195f149690c166e850609a477c532ddbfbaed96a404d4e43f8d5e2689f", size = 39160, upload-time = "2025-11-16T16:26:08.402Z" }, +] + +[[package]] +name = "linkify-it-py" +version = "2.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "uc-micro-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" }, +] + +[[package]] +name = "macholib" +version = "1.16.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "altgraph" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/10/2f/97589876ea967487978071c9042518d28b958d87b17dceb7cdc1d881f963/macholib-1.16.4.tar.gz", hash = "sha256:f408c93ab2e995cd2c46e34fe328b130404be143469e41bc366c807448979362", size = 59427, upload-time = "2025-11-22T08:28:38.373Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/d1/a9f36f8ecdf0fb7c9b1e78c8d7af12b8c8754e74851ac7b94a8305540fc7/macholib-1.16.4-py2.py3-none-any.whl", hash = "sha256:da1a3fa8266e30f0ce7e97c6a54eefaae8edd1e5f86f3eb8b95457cae90265ea", size = 38117, upload-time = "2025-11-22T08:28:36.939Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + +[package.optional-dependencies] +linkify = [ + { name = "linkify-it-py" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" }, + { url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" }, + { url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" }, + { url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" }, + { url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" }, + { url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" }, + { url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" }, + { url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" }, + { url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" }, + { url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" }, + { url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" }, + { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, + { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, + { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, + { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, + { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, + { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, + { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, + { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, + { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, + { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, + { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, + { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, + { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, + { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, + { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, + { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, + { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, + { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, + { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, + { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, + { url = "https://files.pythonhosted.org/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" }, + { url = "https://files.pythonhosted.org/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" }, + { url = "https://files.pythonhosted.org/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" }, + { url = "https://files.pythonhosted.org/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" }, + { url = "https://files.pythonhosted.org/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" }, + { url = "https://files.pythonhosted.org/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" }, + { url = "https://files.pythonhosted.org/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" }, + { url = "https://files.pythonhosted.org/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" }, + { url = "https://files.pythonhosted.org/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" }, + { url = "https://files.pythonhosted.org/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" }, + { url = "https://files.pythonhosted.org/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" }, + { url = "https://files.pythonhosted.org/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" }, + { url = "https://files.pythonhosted.org/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" }, + { url = "https://files.pythonhosted.org/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" }, + { url = "https://files.pythonhosted.org/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" }, + { url = "https://files.pythonhosted.org/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" }, + { url = "https://files.pythonhosted.org/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, +] + +[[package]] +name = "mcp" +version = "1.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "pyjwt", extra = ["crypto"] }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/1a/9c8a5362e3448d585081d6c7aa95898a64e0ac59d3e26169ae6c3ca5feaf/mcp-1.23.0.tar.gz", hash = "sha256:84e0c29316d0a8cf0affd196fd000487ac512aa3f771b63b2ea864e22961772b", size = 596506, upload-time = "2025-12-02T13:40:02.558Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/b2/28739ce409f98159c0121eab56e69ad71546c4f34ac8b42e58c03f57dccc/mcp-1.23.0-py3-none-any.whl", hash = "sha256:5a645cf111ed329f4619f2629a3f15d9aabd7adc2ea09d600d31467b51ecb64f", size = 231427, upload-time = "2025-12-02T13:40:00.738Z" }, +] + +[[package]] +name = "mdit-py-plugins" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "mistral-vibe" +version = "1.0.0" +source = { editable = "." } +dependencies = [ + { name = "agent-client-protocol" }, + { name = "aiofiles" }, + { name = "httpx" }, + { name = "mcp" }, + { name = "mistralai" }, + { name = "packaging" }, + { name = "pexpect" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "pyperclip" }, + { name = "pytest-xdist" }, + { name = "python-dotenv" }, + { name = "rich" }, + { name = "textual" }, + { name = "tomli-w" }, + { name = "watchfiles" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pre-commit" }, + { name = "pyinstaller" }, + { name = "pyright" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-textual-snapshot" }, + { name = "pytest-timeout" }, + { name = "respx" }, + { name = "ruff" }, + { name = "twine" }, + { name = "typos" }, + { name = "vulture" }, +] + +[package.metadata] +requires-dist = [ + { name = "agent-client-protocol", specifier = "==0.6.3" }, + { name = "aiofiles", specifier = ">=24.1.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "mcp", specifier = ">=1.14.0" }, + { name = "mistralai", specifier = "==1.9.11" }, + { name = "packaging", specifier = ">=24.1" }, + { name = "pexpect", specifier = ">=4.9.0" }, + { name = "pydantic", specifier = ">=2.12.4" }, + { name = "pydantic-settings", specifier = ">=2.12.0" }, + { name = "pyperclip", specifier = ">=1.11.0" }, + { name = "pytest-xdist", specifier = ">=3.8.0" }, + { name = "python-dotenv", specifier = ">=1.0.0" }, + { name = "rich", specifier = ">=14.0.0" }, + { name = "textual", specifier = ">=1.0.0" }, + { name = "tomli-w", specifier = ">=1.2.0" }, + { name = "watchfiles", specifier = ">=1.1.1" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pre-commit", specifier = ">=4.2.0" }, + { name = "pyinstaller", specifier = ">=6.17.0" }, + { name = "pyright", specifier = ">=1.1.403" }, + { name = "pytest", specifier = ">=8.3.5" }, + { name = "pytest-asyncio", specifier = ">=1.2.0" }, + { name = "pytest-textual-snapshot", specifier = ">=1.1.0" }, + { name = "pytest-timeout", specifier = ">=2.4.0" }, + { name = "respx", specifier = ">=0.22.0" }, + { name = "ruff", specifier = ">=0.14.5" }, + { name = "twine", specifier = ">=5.0.0" }, + { name = "typos", specifier = ">=1.34.0" }, + { name = "vulture", specifier = ">=2.14" }, +] + +[[package]] +name = "mistralai" +version = "1.9.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eval-type-backport" }, + { name = "httpx" }, + { name = "invoke" }, + { name = "pydantic" }, + { name = "python-dateutil" }, + { name = "pyyaml" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/8d/d8b7af67a966b6f227024e1cb7287fc19901a434f87a5a391dcfe635d338/mistralai-1.9.11.tar.gz", hash = "sha256:3df9e403c31a756ec79e78df25ee73cea3eb15f86693773e16b16adaf59c9b8a", size = 208051, upload-time = "2025-10-02T15:53:40.473Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/76/4ce12563aea5a76016f8643eff30ab731e6656c845e9e4d090ef10c7b925/mistralai-1.9.11-py3-none-any.whl", hash = "sha256:7a3dc2b8ef3fceaa3582220234261b5c4e3e03a972563b07afa150e44a25a6d3", size = 442796, upload-time = "2025-10-02T15:53:39.134Z" }, +] + +[[package]] +name = "more-itertools" +version = "10.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ea/5d/38b681d3fce7a266dd9ab73c66959406d565b3e85f21d5e66e1181d93721/more_itertools-10.8.0.tar.gz", hash = "sha256:f638ddf8a1a0d134181275fb5d58b086ead7c6a72429ad725c67503f13ba30bd", size = 137431, upload-time = "2025-09-02T15:23:11.018Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/8e/469e5a4a2f5855992e425f3cb33804cc07bf18d48f2db061aec61ce50270/more_itertools-10.8.0-py3-none-any.whl", hash = "sha256:52d4362373dcf7c52546bc4af9a86ee7c4579df9a8dc268be0a2f949d376cc9b", size = 69667, upload-time = "2025-09-02T15:23:09.635Z" }, +] + +[[package]] +name = "nh3" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/a5/34c26015d3a434409f4d2a1cd8821a06c05238703f49283ffeb937bef093/nh3-0.3.2.tar.gz", hash = "sha256:f394759a06df8b685a4ebfb1874fb67a9cbfd58c64fc5ed587a663c0e63ec376", size = 19288, upload-time = "2025-10-30T11:17:45.948Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5b/01/a1eda067c0ba823e5e2bb033864ae4854549e49fb6f3407d2da949106bfb/nh3-0.3.2-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:d18957a90806d943d141cc5e4a0fefa1d77cf0d7a156878bf9a66eed52c9cc7d", size = 1419839, upload-time = "2025-10-30T11:17:09.956Z" }, + { url = "https://files.pythonhosted.org/packages/30/57/07826ff65d59e7e9cc789ef1dc405f660cabd7458a1864ab58aefa17411b/nh3-0.3.2-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45c953e57028c31d473d6b648552d9cab1efe20a42ad139d78e11d8f42a36130", size = 791183, upload-time = "2025-10-30T11:17:11.99Z" }, + { url = "https://files.pythonhosted.org/packages/af/2f/e8a86f861ad83f3bb5455f596d5c802e34fcdb8c53a489083a70fd301333/nh3-0.3.2-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c9850041b77a9147d6bbd6dbbf13eeec7009eb60b44e83f07fcb2910075bf9b", size = 829127, upload-time = "2025-10-30T11:17:13.192Z" }, + { url = "https://files.pythonhosted.org/packages/d8/97/77aef4daf0479754e8e90c7f8f48f3b7b8725a3b8c0df45f2258017a6895/nh3-0.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:403c11563e50b915d0efdb622866d1d9e4506bce590ef7da57789bf71dd148b5", size = 997131, upload-time = "2025-10-30T11:17:14.677Z" }, + { url = "https://files.pythonhosted.org/packages/41/ee/fd8140e4df9d52143e89951dd0d797f5546004c6043285289fbbe3112293/nh3-0.3.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:0dca4365db62b2d71ff1620ee4f800c4729849906c5dd504ee1a7b2389558e31", size = 1068783, upload-time = "2025-10-30T11:17:15.861Z" }, + { url = "https://files.pythonhosted.org/packages/87/64/bdd9631779e2d588b08391f7555828f352e7f6427889daf2fa424bfc90c9/nh3-0.3.2-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:0fe7ee035dd7b2290715baf29cb27167dddd2ff70ea7d052c958dbd80d323c99", size = 994732, upload-time = "2025-10-30T11:17:17.155Z" }, + { url = "https://files.pythonhosted.org/packages/79/66/90190033654f1f28ca98e3d76b8be1194505583f9426b0dcde782a3970a2/nh3-0.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a40202fd58e49129764f025bbaae77028e420f1d5b3c8e6f6fd3a6490d513868", size = 975997, upload-time = "2025-10-30T11:17:18.77Z" }, + { url = "https://files.pythonhosted.org/packages/34/30/ebf8e2e8d71fdb5a5d5d8836207177aed1682df819cbde7f42f16898946c/nh3-0.3.2-cp314-cp314t-win32.whl", hash = "sha256:1f9ba555a797dbdcd844b89523f29cdc90973d8bd2e836ea6b962cf567cadd93", size = 583364, upload-time = "2025-10-30T11:17:20.286Z" }, + { url = "https://files.pythonhosted.org/packages/94/ae/95c52b5a75da429f11ca8902c2128f64daafdc77758d370e4cc310ecda55/nh3-0.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:dce4248edc427c9b79261f3e6e2b3ecbdd9b88c267012168b4a7b3fc6fd41d13", size = 589982, upload-time = "2025-10-30T11:17:21.384Z" }, + { url = "https://files.pythonhosted.org/packages/b4/bd/c7d862a4381b95f2469704de32c0ad419def0f4a84b7a138a79532238114/nh3-0.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:019ecbd007536b67fdf76fab411b648fb64e2257ca3262ec80c3425c24028c80", size = 577126, upload-time = "2025-10-30T11:17:22.755Z" }, + { url = "https://files.pythonhosted.org/packages/b6/3e/f5a5cc2885c24be13e9b937441bd16a012ac34a657fe05e58927e8af8b7a/nh3-0.3.2-cp38-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7064ccf5ace75825bd7bf57859daaaf16ed28660c1c6b306b649a9eda4b54b1e", size = 1431980, upload-time = "2025-10-30T11:17:25.457Z" }, + { url = "https://files.pythonhosted.org/packages/7f/f7/529a99324d7ef055de88b690858f4189379708abae92ace799365a797b7f/nh3-0.3.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8745454cdd28bbbc90861b80a0111a195b0e3961b9fa2e672be89eb199fa5d8", size = 820805, upload-time = "2025-10-30T11:17:26.98Z" }, + { url = "https://files.pythonhosted.org/packages/3d/62/19b7c50ccd1fa7d0764822d2cea8f2a320f2fd77474c7a1805cb22cf69b0/nh3-0.3.2-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72d67c25a84579f4a432c065e8b4274e53b7cf1df8f792cf846abfe2c3090866", size = 803527, upload-time = "2025-10-30T11:17:28.284Z" }, + { url = "https://files.pythonhosted.org/packages/4a/ca/f022273bab5440abff6302731a49410c5ef66b1a9502ba3fbb2df998d9ff/nh3-0.3.2-cp38-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:13398e676a14d6233f372c75f52d5ae74f98210172991f7a3142a736bd92b131", size = 1051674, upload-time = "2025-10-30T11:17:29.909Z" }, + { url = "https://files.pythonhosted.org/packages/fa/f7/5728e3b32a11daf5bd21cf71d91c463f74305938bc3eb9e0ac1ce141646e/nh3-0.3.2-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03d617e5c8aa7331bd2659c654e021caf9bba704b109e7b2b28b039a00949fe5", size = 1004737, upload-time = "2025-10-30T11:17:31.205Z" }, + { url = "https://files.pythonhosted.org/packages/53/7f/f17e0dba0a99cee29e6cee6d4d52340ef9cb1f8a06946d3a01eb7ec2fb01/nh3-0.3.2-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f55c4d2d5a207e74eefe4d828067bbb01300e06e2a7436142f915c5928de07", size = 911745, upload-time = "2025-10-30T11:17:32.945Z" }, + { url = "https://files.pythonhosted.org/packages/42/0f/c76bf3dba22c73c38e9b1113b017cf163f7696f50e003404ec5ecdb1e8a6/nh3-0.3.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb18403f02b655a1bbe4e3a4696c2ae1d6ae8f5991f7cacb684b1ae27e6c9f7", size = 797184, upload-time = "2025-10-30T11:17:34.226Z" }, + { url = "https://files.pythonhosted.org/packages/08/a1/73d8250f888fb0ddf1b119b139c382f8903d8bb0c5bd1f64afc7e38dad1d/nh3-0.3.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d66f41672eb4060cf87c037f760bdbc6847852ca9ef8e9c5a5da18f090abf87", size = 838556, upload-time = "2025-10-30T11:17:35.875Z" }, + { url = "https://files.pythonhosted.org/packages/d1/09/deb57f1fb656a7a5192497f4a287b0ade5a2ff6b5d5de4736d13ef6d2c1f/nh3-0.3.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f97f8b25cb2681d25e2338148159447e4d689aafdccfcf19e61ff7db3905768a", size = 1006695, upload-time = "2025-10-30T11:17:37.071Z" }, + { url = "https://files.pythonhosted.org/packages/b6/61/8f4d41c4ccdac30e4b1a4fa7be4b0f9914d8314a5058472f84c8e101a418/nh3-0.3.2-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:2ab70e8c6c7d2ce953d2a58102eefa90c2d0a5ed7aa40c7e29a487bc5e613131", size = 1075471, upload-time = "2025-10-30T11:17:38.225Z" }, + { url = "https://files.pythonhosted.org/packages/b0/c6/966aec0cb4705e69f6c3580422c239205d5d4d0e50fac380b21e87b6cf1b/nh3-0.3.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:1710f3901cd6440ca92494ba2eb6dc260f829fa8d9196b659fa10de825610ce0", size = 1002439, upload-time = "2025-10-30T11:17:39.553Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c8/97a2d5f7a314cce2c5c49f30c6f161b7f3617960ade4bfc2fd1ee092cb20/nh3-0.3.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:91e9b001101fb4500a2aafe3e7c92928d85242d38bf5ac0aba0b7480da0a4cd6", size = 987439, upload-time = "2025-10-30T11:17:40.81Z" }, + { url = "https://files.pythonhosted.org/packages/0d/95/2d6fc6461687d7a171f087995247dec33e8749a562bfadd85fb5dbf37a11/nh3-0.3.2-cp38-abi3-win32.whl", hash = "sha256:169db03df90da63286e0560ea0efa9b6f3b59844a9735514a1d47e6bb2c8c61b", size = 589826, upload-time = "2025-10-30T11:17:42.239Z" }, + { url = "https://files.pythonhosted.org/packages/64/9a/1a1c154f10a575d20dd634e5697805e589bbdb7673a0ad00e8da90044ba7/nh3-0.3.2-cp38-abi3-win_amd64.whl", hash = "sha256:562da3dca7a17f9077593214a9781a94b8d76de4f158f8c895e62f09573945fe", size = 596406, upload-time = "2025-10-30T11:17:43.773Z" }, + { url = "https://files.pythonhosted.org/packages/9e/7e/a96255f63b7aef032cbee8fc4d6e37def72e3aaedc1f72759235e8f13cb1/nh3-0.3.2-cp38-abi3-win_arm64.whl", hash = "sha256:cf5964d54edd405e68583114a7cba929468bcd7db5e676ae38ee954de1cfc104", size = 584162, upload-time = "2025-10-30T11:17:44.96Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pefile" +version = "2024.8.26" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/4f/2750f7f6f025a1507cd3b7218691671eecfd0bbebebe8b39aa0fe1d360b8/pefile-2024.8.26.tar.gz", hash = "sha256:3ff6c5d8b43e8c37bb6e6dd5085658d658a7a0bdcd20b6a07b1fcfc1c4e9d632", size = 76008, upload-time = "2024-08-26T20:58:38.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/16/12b82f791c7f50ddec566873d5bdd245baa1491bac11d15ffb98aecc8f8b/pefile-2024.8.26-py3-none-any.whl", hash = "sha256:76f8b485dcd3b1bb8166f1128d395fa3d87af26360c2358fb75b80019b957c6f", size = 74766, upload-time = "2024-08-26T21:01:02.632Z" }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/61/33/9611380c2bdb1225fdef633e2a9610622310fed35ab11dac9620972ee088/platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312", size = 21632, upload-time = "2025-10-08T17:44:48.791Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/cb/ac7874b3e5d58441674fb70742e6c374b28b0c7cb988d37d991cde47166c/platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3", size = 18651, upload-time = "2025-10-08T17:44:47.223Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pre-commit" +version = "4.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgv" }, + { name = "identify" }, + { name = "nodeenv" }, + { name = "pyyaml" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/9b/6a4ffb4ed980519da959e1cf3122fc6cb41211daa58dbae1c73c0e519a37/pre_commit-4.5.0.tar.gz", hash = "sha256:dc5a065e932b19fc1d4c653c6939068fe54325af8e741e74e88db4d28a4dd66b", size = 198428, upload-time = "2025-11-22T21:02:42.304Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/c4/b2d28e9d2edf4f1713eb3c29307f1a63f3d67cf09bdda29715a36a68921a/pre_commit-4.5.0-py2.py3-none-any.whl", hash = "sha256:25e2ce09595174d9c97860a95609f9f852c0614ba602de3561e267547f2335e1", size = 226429, upload-time = "2025-11-22T21:02:40.836Z" }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/4b/ac7e0aae12027748076d72a8764ff1c9d82ca75a7a52622e67ed3f765c54/pydantic_settings-2.12.0.tar.gz", hash = "sha256:005538ef951e3c2a68e1c08b292b5f2e71490def8589d4221b95dab00dafcfd0", size = 194184, upload-time = "2025-11-10T14:25:47.013Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyinstaller" +version = "6.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "altgraph" }, + { name = "macholib", marker = "sys_platform == 'darwin'" }, + { name = "packaging" }, + { name = "pefile", marker = "sys_platform == 'win32'" }, + { name = "pyinstaller-hooks-contrib" }, + { name = "pywin32-ctypes", marker = "sys_platform == 'win32'" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/80/9e0dad9c69a7cfd4b5aaede8c6225d762bab7247a2a6b7651e1995522001/pyinstaller-6.17.0.tar.gz", hash = "sha256:be372bd911392b88277e510940ac32a5c2a6ce4b8d00a311c78fa443f4f27313", size = 4014147, upload-time = "2025-11-24T19:43:32.109Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/f5/37e419d84d5284ecab11ef8b61306a3b978fe6f0fd69a9541e16bfd72e65/pyinstaller-6.17.0-py3-none-macosx_10_13_universal2.whl", hash = "sha256:4e446b8030c6e5a2f712e3f82011ecf6c7ead86008357b0d23a0ec4bcde31dac", size = 1031880, upload-time = "2025-11-24T19:42:30.862Z" }, + { url = "https://files.pythonhosted.org/packages/9e/b6/2e184879ab9cf90a1d2867fdd34d507c4d246b3cc52ca05aad00bfc70ee7/pyinstaller-6.17.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:aa9fd87aaa28239c6f0d0210114029bd03f8cac316a90bab071a5092d7c85ad7", size = 731968, upload-time = "2025-11-24T19:42:35.421Z" }, + { url = "https://files.pythonhosted.org/packages/40/76/f529de98f7e5cce7904c19b224990003fc2267eda2ee5fdd8452acb420a9/pyinstaller-6.17.0-py3-none-manylinux2014_i686.whl", hash = "sha256:060b122e43e7c0b23e759a4153be34bd70914135ab955bb18a67181e0dca85a2", size = 743217, upload-time = "2025-11-24T19:42:39.286Z" }, + { url = "https://files.pythonhosted.org/packages/a3/10/c02bfbb050cafc4c353cf69baf95407e211e1372bd286ab5ce5cbc13a30a/pyinstaller-6.17.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:cd213d1a545c97dfe4a3c40e8213ff7c5127fc115c49229f27a3fa541503444b", size = 741119, upload-time = "2025-11-24T19:42:43.12Z" }, + { url = "https://files.pythonhosted.org/packages/11/9d/69fdacfd9335695f5900a376cfe3e4aed28f0720ffc15fee81fdb9d920bc/pyinstaller-6.17.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:89c0d18ba8b62c6607abd8cf2299ae5ffa5c36d8c47f39608ce8c3f357f6099f", size = 738111, upload-time = "2025-11-24T19:42:46.97Z" }, + { url = "https://files.pythonhosted.org/packages/5e/1e/e8e36e1568f6865ac706c6e1f875c1a346ddaa9f9a8f923d66545d2240ed/pyinstaller-6.17.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2a147b83cdebb07855bd5a663600891550062373a2ca375c58eacead33741a27", size = 737795, upload-time = "2025-11-24T19:42:50.675Z" }, + { url = "https://files.pythonhosted.org/packages/8d/15/9dc0f81ccb746c27bfa6ee53164422fe47ee079c7a717d9c4791aba78797/pyinstaller-6.17.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:f8cfbbfa6708e54fb936df6dd6eafaf133e84efb0d2fe25b91cfeefa793c4ca4", size = 736891, upload-time = "2025-11-24T19:42:54.458Z" }, + { url = "https://files.pythonhosted.org/packages/97/e6/bed54821c1ebe1275c559661d3e7bfa23c406673b515252dfbf89db56c65/pyinstaller-6.17.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:97f4c1942f7b4cd73f9e38b49cc8f5f8a6fbb44922cb60dd3073a189b77ee1ae", size = 736752, upload-time = "2025-11-24T19:42:58.144Z" }, + { url = "https://files.pythonhosted.org/packages/c7/84/897d759198676b910d69d42640b6d25d50b449f2209e18127a974cf59dbe/pyinstaller-6.17.0-py3-none-win32.whl", hash = "sha256:ce0be227a037fd4be672226db709088565484f597d6b230bceec19850fdd4c85", size = 1317851, upload-time = "2025-11-24T19:43:04.361Z" }, + { url = "https://files.pythonhosted.org/packages/2d/f5/6a122efe024433ecc34aab6f499e0bd2bbe059c639b77b0045aa2421b0bf/pyinstaller-6.17.0-py3-none-win_amd64.whl", hash = "sha256:b019940dbf7a01489d6b26f9fb97db74b504e0a757010f7ad078675befc85a82", size = 1378685, upload-time = "2025-11-24T19:43:10.395Z" }, + { url = "https://files.pythonhosted.org/packages/c4/96/14991773c9e599707a53594429ccf372f9ee638df3b7d26b65fd1a7433f0/pyinstaller-6.17.0-py3-none-win_arm64.whl", hash = "sha256:3c92a335e338170df7e615f75279cfeea97ade89e6dd7694943c8c185460f7b7", size = 1320032, upload-time = "2025-11-24T19:43:16.388Z" }, +] + +[[package]] +name = "pyinstaller-hooks-contrib" +version = "2025.10" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/4f/e33132acdb8f732978e577b8a0130a412cbfe7a3414605e3fd380a975522/pyinstaller_hooks_contrib-2025.10.tar.gz", hash = "sha256:a1a737e5c0dccf1cf6f19a25e2efd109b9fec9ddd625f97f553dac16ee884881", size = 168155, upload-time = "2025-11-22T09:34:36.138Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/de/a7688eed49a1d3df337cdaa4c0d64e231309a52f269850a72051975e3c4a/pyinstaller_hooks_contrib-2025.10-py3-none-any.whl", hash = "sha256:aa7a378518772846221f63a84d6306d9827299323243db890851474dfd1231a9", size = 447760, upload-time = "2025-11-22T09:34:34.753Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[package.optional-dependencies] +crypto = [ + { name = "cryptography" }, +] + +[[package]] +name = "pyperclip" +version = "1.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/52/d87eba7cb129b81563019d1679026e7a112ef76855d6159d24754dbd2a51/pyperclip-1.11.0.tar.gz", hash = "sha256:244035963e4428530d9e3a6101a1ef97209c6825edab1567beac148ccc1db1b6", size = 12185, upload-time = "2025-09-26T14:40:37.245Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/80/fc9d01d5ed37ba4c42ca2b55b4339ae6e200b456be3a1aaddf4a9fa99b8c/pyperclip-1.11.0-py3-none-any.whl", hash = "sha256:299403e9ff44581cb9ba2ffeed69c7aa96a008622ad0c46cb575ca75b5b84273", size = 11063, upload-time = "2025-09-26T14:40:36.069Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.407" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/1b/0aa08ee42948b61745ac5b5b5ccaec4669e8884b53d31c8ec20b2fcd6b6f/pyright-1.1.407.tar.gz", hash = "sha256:099674dba5c10489832d4a4b2d302636152a9a42d317986c38474c76fe562262", size = 4122872, upload-time = "2025-10-24T23:17:15.145Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/93/b69052907d032b00c40cb656d21438ec00b3a471733de137a3f65a49a0a0/pyright-1.1.407-py3-none-any.whl", hash = "sha256:6dd419f54fcc13f03b52285796d65e639786373f433e243f8b94cf93a7444d21", size = 5997008, upload-time = "2025-10-24T23:17:13.159Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" }, +] + +[[package]] +name = "pytest-textual-snapshot" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinja2" }, + { name = "pytest" }, + { name = "rich" }, + { name = "syrupy" }, + { name = "textual" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3e/7f/4135f87e12c1c46376971fec5ebfe71f7f8b15ac20f887c90932dedd6e78/pytest_textual_snapshot-1.1.0.tar.gz", hash = "sha256:96d48ab01306852a3b4ae165f008d5fdd7fda777e91e9d2c3ea0f7d7458544eb", size = 11391, upload-time = "2025-01-23T16:12:00.537Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/30/c31d800f8d40d663fc84d83548b26aecf613c9c39bd6985c813d623d7b84/pytest_textual_snapshot-1.1.0-py3-none-any.whl", hash = "sha256:fdf7727d2bc444f947554308da1b08df7a45215fe49d0621cbbc24c33e8f7b8d", size = 11451, upload-time = "2025-01-23T16:11:59.389Z" }, +] + +[[package]] +name = "pytest-timeout" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/82/4c9ecabab13363e72d880f2fb504c5f750433b2b6f16e99f4ec21ada284c/pytest_timeout-2.4.0.tar.gz", hash = "sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a", size = 17973, upload-time = "2025-05-05T19:44:34.99Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/b6/3127540ecdf1464a00e5a01ee60a1b09175f6913f0644ac748494d9c4b21/pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2", size = 14382, upload-time = "2025-05-05T19:44:33.502Z" }, +] + +[[package]] +name = "pytest-xdist" +version = "3.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/b4/439b179d1ff526791eb921115fca8e44e596a13efeda518b9d845a619450/pytest_xdist-3.8.0.tar.gz", hash = "sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1", size = 88069, upload-time = "2025-07-01T13:30:59.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/31/d4e37e9e550c2b92a9cbc2e4d0b7420a27224968580b5a447f420847c975/pytest_xdist-3.8.0-py3-none-any.whl", hash = "sha256:202ca578cfeb7370784a8c33d6d05bc6e13b4f25b5053c30a152269fd10f0b88", size = 46396, upload-time = "2025-07-01T13:30:56.632Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, +] + +[[package]] +name = "pywin32-ctypes" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/9f/01a1a99704853cb63f253eea009390c88e7131c67e66a0a02099a8c917cb/pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755", size = 29471, upload-time = "2024-08-14T10:15:34.626Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/3d/8161f7711c017e01ac9f008dfddd9410dff3674334c233bde66e7ba65bbf/pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8", size = 30756, upload-time = "2024-08-14T10:15:33.187Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, + { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, + { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, + { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, + { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, + { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, + { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, +] + +[[package]] +name = "readme-renderer" +version = "44.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "nh3" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/a9/104ec9234c8448c4379768221ea6df01260cd6c2ce13182d4eac531c8342/readme_renderer-44.0.tar.gz", hash = "sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1", size = 32056, upload-time = "2024-07-08T15:00:57.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/67/921ec3024056483db83953ae8e48079ad62b92db7880013ca77632921dd0/readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151", size = 13310, upload-time = "2024-07-08T15:00:56.577Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888, upload-time = "2023-05-01T04:11:33.229Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" }, +] + +[[package]] +name = "respx" +version = "0.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/7c/96bd0bc759cf009675ad1ee1f96535edcb11e9666b985717eb8c87192a95/respx-0.22.0.tar.gz", hash = "sha256:3c8924caa2a50bd71aefc07aa812f2466ff489f1848c96e954a5362d17095d91", size = 28439, upload-time = "2024-12-19T22:33:59.374Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/67/afbb0978d5399bc9ea200f1d4489a23c9a1dad4eee6376242b8182389c79/respx-0.22.0-py2.py3-none-any.whl", hash = "sha256:631128d4c9aba15e56903fb5f66fb1eff412ce28dd387ca3a81339e52dbd3ad0", size = 25127, upload-time = "2024-12-19T22:33:57.837Z" }, +] + +[[package]] +name = "rfc3986" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/40/1520d68bfa07ab5a6f065a186815fb6610c86fe957bc065754e47f7b0840/rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c", size = 49026, upload-time = "2022-01-10T00:52:30.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/9a/9afaade874b2fa6c752c36f1548f718b5b83af81ed9b76628329dab81c1b/rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd", size = 31326, upload-time = "2022-01-10T00:52:29.594Z" }, +] + +[[package]] +name = "rich" +version = "14.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/7a/b0178788f8dc6cafce37a212c99565fa1fe7872c70c6c9c1e1a372d9d88f/rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd", size = 243393, upload-time = "2025-10-09T14:16:51.245Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/af/3f2f423103f1113b36230496629986e0ef7e199d2aa8392452b484b38ced/rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84", size = 69469, upload-time = "2025-11-30T20:24:38.837Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/e7/98a2f4ac921d82f33e03f3835f5bf3a4a40aa1bfdc57975e74a97b2b4bdd/rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad", size = 375086, upload-time = "2025-11-30T20:22:17.93Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a1/bca7fd3d452b272e13335db8d6b0b3ecde0f90ad6f16f3328c6fb150c889/rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05", size = 359053, upload-time = "2025-11-30T20:22:19.297Z" }, + { url = "https://files.pythonhosted.org/packages/65/1c/ae157e83a6357eceff62ba7e52113e3ec4834a84cfe07fa4b0757a7d105f/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28", size = 390763, upload-time = "2025-11-30T20:22:21.661Z" }, + { url = "https://files.pythonhosted.org/packages/d4/36/eb2eb8515e2ad24c0bd43c3ee9cd74c33f7ca6430755ccdb240fd3144c44/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd", size = 408951, upload-time = "2025-11-30T20:22:23.408Z" }, + { url = "https://files.pythonhosted.org/packages/d6/65/ad8dc1784a331fabbd740ef6f71ce2198c7ed0890dab595adb9ea2d775a1/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f", size = 514622, upload-time = "2025-11-30T20:22:25.16Z" }, + { url = "https://files.pythonhosted.org/packages/63/8e/0cfa7ae158e15e143fe03993b5bcd743a59f541f5952e1546b1ac1b5fd45/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1", size = 414492, upload-time = "2025-11-30T20:22:26.505Z" }, + { url = "https://files.pythonhosted.org/packages/60/1b/6f8f29f3f995c7ffdde46a626ddccd7c63aefc0efae881dc13b6e5d5bb16/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23", size = 394080, upload-time = "2025-11-30T20:22:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d5/a266341051a7a3ca2f4b750a3aa4abc986378431fc2da508c5034d081b70/rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6", size = 408680, upload-time = "2025-11-30T20:22:29.341Z" }, + { url = "https://files.pythonhosted.org/packages/10/3b/71b725851df9ab7a7a4e33cf36d241933da66040d195a84781f49c50490c/rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51", size = 423589, upload-time = "2025-11-30T20:22:31.469Z" }, + { url = "https://files.pythonhosted.org/packages/00/2b/e59e58c544dc9bd8bd8384ecdb8ea91f6727f0e37a7131baeff8d6f51661/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5", size = 573289, upload-time = "2025-11-30T20:22:32.997Z" }, + { url = "https://files.pythonhosted.org/packages/da/3e/a18e6f5b460893172a7d6a680e86d3b6bc87a54c1f0b03446a3c8c7b588f/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e", size = 599737, upload-time = "2025-11-30T20:22:34.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e2/714694e4b87b85a18e2c243614974413c60aa107fd815b8cbc42b873d1d7/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394", size = 563120, upload-time = "2025-11-30T20:22:35.903Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ab/d5d5e3bcedb0a77f4f613706b750e50a5a3ba1c15ccd3665ecc636c968fd/rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf", size = 223782, upload-time = "2025-11-30T20:22:37.271Z" }, + { url = "https://files.pythonhosted.org/packages/39/3b/f786af9957306fdc38a74cef405b7b93180f481fb48453a114bb6465744a/rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b", size = 240463, upload-time = "2025-11-30T20:22:39.021Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d2/b91dc748126c1559042cfe41990deb92c4ee3e2b415f6b5234969ffaf0cc/rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e", size = 230868, upload-time = "2025-11-30T20:22:40.493Z" }, + { url = "https://files.pythonhosted.org/packages/ed/dc/d61221eb88ff410de3c49143407f6f3147acf2538c86f2ab7ce65ae7d5f9/rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2", size = 374887, upload-time = "2025-11-30T20:22:41.812Z" }, + { url = "https://files.pythonhosted.org/packages/fd/32/55fb50ae104061dbc564ef15cc43c013dc4a9f4527a1f4d99baddf56fe5f/rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8", size = 358904, upload-time = "2025-11-30T20:22:43.479Z" }, + { url = "https://files.pythonhosted.org/packages/58/70/faed8186300e3b9bdd138d0273109784eea2396c68458ed580f885dfe7ad/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4", size = 389945, upload-time = "2025-11-30T20:22:44.819Z" }, + { url = "https://files.pythonhosted.org/packages/bd/a8/073cac3ed2c6387df38f71296d002ab43496a96b92c823e76f46b8af0543/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136", size = 407783, upload-time = "2025-11-30T20:22:46.103Z" }, + { url = "https://files.pythonhosted.org/packages/77/57/5999eb8c58671f1c11eba084115e77a8899d6e694d2a18f69f0ba471ec8b/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7", size = 515021, upload-time = "2025-11-30T20:22:47.458Z" }, + { url = "https://files.pythonhosted.org/packages/e0/af/5ab4833eadc36c0a8ed2bc5c0de0493c04f6c06de223170bd0798ff98ced/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2", size = 414589, upload-time = "2025-11-30T20:22:48.872Z" }, + { url = "https://files.pythonhosted.org/packages/b7/de/f7192e12b21b9e9a68a6d0f249b4af3fdcdff8418be0767a627564afa1f1/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6", size = 394025, upload-time = "2025-11-30T20:22:50.196Z" }, + { url = "https://files.pythonhosted.org/packages/91/c4/fc70cd0249496493500e7cc2de87504f5aa6509de1e88623431fec76d4b6/rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e", size = 408895, upload-time = "2025-11-30T20:22:51.87Z" }, + { url = "https://files.pythonhosted.org/packages/58/95/d9275b05ab96556fefff73a385813eb66032e4c99f411d0795372d9abcea/rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d", size = 422799, upload-time = "2025-11-30T20:22:53.341Z" }, + { url = "https://files.pythonhosted.org/packages/06/c1/3088fc04b6624eb12a57eb814f0d4997a44b0d208d6cace713033ff1a6ba/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7", size = 572731, upload-time = "2025-11-30T20:22:54.778Z" }, + { url = "https://files.pythonhosted.org/packages/d8/42/c612a833183b39774e8ac8fecae81263a68b9583ee343db33ab571a7ce55/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31", size = 599027, upload-time = "2025-11-30T20:22:56.212Z" }, + { url = "https://files.pythonhosted.org/packages/5f/60/525a50f45b01d70005403ae0e25f43c0384369ad24ffe46e8d9068b50086/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95", size = 563020, upload-time = "2025-11-30T20:22:58.2Z" }, + { url = "https://files.pythonhosted.org/packages/0b/5d/47c4655e9bcd5ca907148535c10e7d489044243cc9941c16ed7cd53be91d/rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d", size = 223139, upload-time = "2025-11-30T20:23:00.209Z" }, + { url = "https://files.pythonhosted.org/packages/f2/e1/485132437d20aa4d3e1d8b3fb5a5e65aa8139f1e097080c2a8443201742c/rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15", size = 240224, upload-time = "2025-11-30T20:23:02.008Z" }, + { url = "https://files.pythonhosted.org/packages/24/95/ffd128ed1146a153d928617b0ef673960130be0009c77d8fbf0abe306713/rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1", size = 230645, upload-time = "2025-11-30T20:23:03.43Z" }, + { url = "https://files.pythonhosted.org/packages/ff/1b/b10de890a0def2a319a2626334a7f0ae388215eb60914dbac8a3bae54435/rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a", size = 364443, upload-time = "2025-11-30T20:23:04.878Z" }, + { url = "https://files.pythonhosted.org/packages/0d/bf/27e39f5971dc4f305a4fb9c672ca06f290f7c4e261c568f3dea16a410d47/rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e", size = 353375, upload-time = "2025-11-30T20:23:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/40/58/442ada3bba6e8e6615fc00483135c14a7538d2ffac30e2d933ccf6852232/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000", size = 383850, upload-time = "2025-11-30T20:23:07.825Z" }, + { url = "https://files.pythonhosted.org/packages/14/14/f59b0127409a33c6ef6f5c1ebd5ad8e32d7861c9c7adfa9a624fc3889f6c/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db", size = 392812, upload-time = "2025-11-30T20:23:09.228Z" }, + { url = "https://files.pythonhosted.org/packages/b3/66/e0be3e162ac299b3a22527e8913767d869e6cc75c46bd844aa43fb81ab62/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2", size = 517841, upload-time = "2025-11-30T20:23:11.186Z" }, + { url = "https://files.pythonhosted.org/packages/3d/55/fa3b9cf31d0c963ecf1ba777f7cf4b2a2c976795ac430d24a1f43d25a6ba/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa", size = 408149, upload-time = "2025-11-30T20:23:12.864Z" }, + { url = "https://files.pythonhosted.org/packages/60/ca/780cf3b1a32b18c0f05c441958d3758f02544f1d613abf9488cd78876378/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083", size = 383843, upload-time = "2025-11-30T20:23:14.638Z" }, + { url = "https://files.pythonhosted.org/packages/82/86/d5f2e04f2aa6247c613da0c1dd87fcd08fa17107e858193566048a1e2f0a/rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9", size = 396507, upload-time = "2025-11-30T20:23:16.105Z" }, + { url = "https://files.pythonhosted.org/packages/4b/9a/453255d2f769fe44e07ea9785c8347edaf867f7026872e76c1ad9f7bed92/rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0", size = 414949, upload-time = "2025-11-30T20:23:17.539Z" }, + { url = "https://files.pythonhosted.org/packages/a3/31/622a86cdc0c45d6df0e9ccb6becdba5074735e7033c20e401a6d9d0e2ca0/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94", size = 565790, upload-time = "2025-11-30T20:23:19.029Z" }, + { url = "https://files.pythonhosted.org/packages/1c/5d/15bbf0fb4a3f58a3b1c67855ec1efcc4ceaef4e86644665fff03e1b66d8d/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08", size = 590217, upload-time = "2025-11-30T20:23:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/6d/61/21b8c41f68e60c8cc3b2e25644f0e3681926020f11d06ab0b78e3c6bbff1/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27", size = 555806, upload-time = "2025-11-30T20:23:22.488Z" }, + { url = "https://files.pythonhosted.org/packages/f9/39/7e067bb06c31de48de3eb200f9fc7c58982a4d3db44b07e73963e10d3be9/rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6", size = 211341, upload-time = "2025-11-30T20:23:24.449Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4d/222ef0b46443cf4cf46764d9c630f3fe4abaa7245be9417e56e9f52b8f65/rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d", size = 225768, upload-time = "2025-11-30T20:23:25.908Z" }, + { url = "https://files.pythonhosted.org/packages/86/81/dad16382ebbd3d0e0328776d8fd7ca94220e4fa0798d1dc5e7da48cb3201/rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0", size = 362099, upload-time = "2025-11-30T20:23:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/2b/60/19f7884db5d5603edf3c6bce35408f45ad3e97e10007df0e17dd57af18f8/rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be", size = 353192, upload-time = "2025-11-30T20:23:29.151Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c4/76eb0e1e72d1a9c4703c69607cec123c29028bff28ce41588792417098ac/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f", size = 384080, upload-time = "2025-11-30T20:23:30.785Z" }, + { url = "https://files.pythonhosted.org/packages/72/87/87ea665e92f3298d1b26d78814721dc39ed8d2c74b86e83348d6b48a6f31/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f", size = 394841, upload-time = "2025-11-30T20:23:32.209Z" }, + { url = "https://files.pythonhosted.org/packages/77/ad/7783a89ca0587c15dcbf139b4a8364a872a25f861bdb88ed99f9b0dec985/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87", size = 516670, upload-time = "2025-11-30T20:23:33.742Z" }, + { url = "https://files.pythonhosted.org/packages/5b/3c/2882bdac942bd2172f3da574eab16f309ae10a3925644e969536553cb4ee/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18", size = 408005, upload-time = "2025-11-30T20:23:35.253Z" }, + { url = "https://files.pythonhosted.org/packages/ce/81/9a91c0111ce1758c92516a3e44776920b579d9a7c09b2b06b642d4de3f0f/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad", size = 382112, upload-time = "2025-11-30T20:23:36.842Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8e/1da49d4a107027e5fbc64daeab96a0706361a2918da10cb41769244b805d/rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07", size = 399049, upload-time = "2025-11-30T20:23:38.343Z" }, + { url = "https://files.pythonhosted.org/packages/df/5a/7ee239b1aa48a127570ec03becbb29c9d5a9eb092febbd1699d567cae859/rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f", size = 415661, upload-time = "2025-11-30T20:23:40.263Z" }, + { url = "https://files.pythonhosted.org/packages/70/ea/caa143cf6b772f823bc7929a45da1fa83569ee49b11d18d0ada7f5ee6fd6/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65", size = 565606, upload-time = "2025-11-30T20:23:42.186Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/ac20ba2d69303f961ad8cf55bf7dbdb4763f627291ba3d0d7d67333cced9/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f", size = 591126, upload-time = "2025-11-30T20:23:44.086Z" }, + { url = "https://files.pythonhosted.org/packages/21/20/7ff5f3c8b00c8a95f75985128c26ba44503fb35b8e0259d812766ea966c7/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53", size = 553371, upload-time = "2025-11-30T20:23:46.004Z" }, + { url = "https://files.pythonhosted.org/packages/72/c7/81dadd7b27c8ee391c132a6b192111ca58d866577ce2d9b0ca157552cce0/rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed", size = 215298, upload-time = "2025-11-30T20:23:47.696Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d2/1aaac33287e8cfb07aab2e6b8ac1deca62f6f65411344f1433c55e6f3eb8/rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950", size = 228604, upload-time = "2025-11-30T20:23:49.501Z" }, + { url = "https://files.pythonhosted.org/packages/e8/95/ab005315818cc519ad074cb7784dae60d939163108bd2b394e60dc7b5461/rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6", size = 222391, upload-time = "2025-11-30T20:23:50.96Z" }, + { url = "https://files.pythonhosted.org/packages/9e/68/154fe0194d83b973cdedcdcc88947a2752411165930182ae41d983dcefa6/rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb", size = 364868, upload-time = "2025-11-30T20:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/83/69/8bbc8b07ec854d92a8b75668c24d2abcb1719ebf890f5604c61c9369a16f/rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8", size = 353747, upload-time = "2025-11-30T20:23:54.036Z" }, + { url = "https://files.pythonhosted.org/packages/ab/00/ba2e50183dbd9abcce9497fa5149c62b4ff3e22d338a30d690f9af970561/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7", size = 383795, upload-time = "2025-11-30T20:23:55.556Z" }, + { url = "https://files.pythonhosted.org/packages/05/6f/86f0272b84926bcb0e4c972262f54223e8ecc556b3224d281e6598fc9268/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898", size = 393330, upload-time = "2025-11-30T20:23:57.033Z" }, + { url = "https://files.pythonhosted.org/packages/cb/e9/0e02bb2e6dc63d212641da45df2b0bf29699d01715913e0d0f017ee29438/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e", size = 518194, upload-time = "2025-11-30T20:23:58.637Z" }, + { url = "https://files.pythonhosted.org/packages/ee/ca/be7bca14cf21513bdf9c0606aba17d1f389ea2b6987035eb4f62bd923f25/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419", size = 408340, upload-time = "2025-11-30T20:24:00.2Z" }, + { url = "https://files.pythonhosted.org/packages/c2/c7/736e00ebf39ed81d75544c0da6ef7b0998f8201b369acf842f9a90dc8fce/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551", size = 383765, upload-time = "2025-11-30T20:24:01.759Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3f/da50dfde9956aaf365c4adc9533b100008ed31aea635f2b8d7b627e25b49/rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8", size = 396834, upload-time = "2025-11-30T20:24:03.687Z" }, + { url = "https://files.pythonhosted.org/packages/4e/00/34bcc2565b6020eab2623349efbdec810676ad571995911f1abdae62a3a0/rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5", size = 415470, upload-time = "2025-11-30T20:24:05.232Z" }, + { url = "https://files.pythonhosted.org/packages/8c/28/882e72b5b3e6f718d5453bd4d0d9cf8df36fddeb4ddbbab17869d5868616/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404", size = 565630, upload-time = "2025-11-30T20:24:06.878Z" }, + { url = "https://files.pythonhosted.org/packages/3b/97/04a65539c17692de5b85c6e293520fd01317fd878ea1995f0367d4532fb1/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856", size = 591148, upload-time = "2025-11-30T20:24:08.445Z" }, + { url = "https://files.pythonhosted.org/packages/85/70/92482ccffb96f5441aab93e26c4d66489eb599efdcf96fad90c14bbfb976/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40", size = 556030, upload-time = "2025-11-30T20:24:10.956Z" }, + { url = "https://files.pythonhosted.org/packages/20/53/7c7e784abfa500a2b6b583b147ee4bb5a2b3747a9166bab52fec4b5b5e7d/rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0", size = 211570, upload-time = "2025-11-30T20:24:12.735Z" }, + { url = "https://files.pythonhosted.org/packages/d0/02/fa464cdfbe6b26e0600b62c528b72d8608f5cc49f96b8d6e38c95d60c676/rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3", size = 226532, upload-time = "2025-11-30T20:24:14.634Z" }, +] + +[[package]] +name = "ruff" +version = "0.14.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b7/5b/dd7406afa6c95e3d8fa9d652b6d6dd17dd4a6bf63cb477014e8ccd3dcd46/ruff-0.14.7.tar.gz", hash = "sha256:3417deb75d23bd14a722b57b0a1435561db65f0ad97435b4cf9f85ffcef34ae5", size = 5727324, upload-time = "2025-11-28T20:55:10.525Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/b1/7ea5647aaf90106f6d102230e5df874613da43d1089864da1553b899ba5e/ruff-0.14.7-py3-none-linux_armv6l.whl", hash = "sha256:b9d5cb5a176c7236892ad7224bc1e63902e4842c460a0b5210701b13e3de4fca", size = 13414475, upload-time = "2025-11-28T20:54:54.569Z" }, + { url = "https://files.pythonhosted.org/packages/af/19/fddb4cd532299db9cdaf0efdc20f5c573ce9952a11cb532d3b859d6d9871/ruff-0.14.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3f64fe375aefaf36ca7d7250292141e39b4cea8250427482ae779a2aa5d90015", size = 13634613, upload-time = "2025-11-28T20:55:17.54Z" }, + { url = "https://files.pythonhosted.org/packages/40/2b/469a66e821d4f3de0440676ed3e04b8e2a1dc7575cf6fa3ba6d55e3c8557/ruff-0.14.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:93e83bd3a9e1a3bda64cb771c0d47cda0e0d148165013ae2d3554d718632d554", size = 12765458, upload-time = "2025-11-28T20:55:26.128Z" }, + { url = "https://files.pythonhosted.org/packages/f1/05/0b001f734fe550bcfde4ce845948ac620ff908ab7241a39a1b39bb3c5f49/ruff-0.14.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3838948e3facc59a6070795de2ae16e5786861850f78d5914a03f12659e88f94", size = 13236412, upload-time = "2025-11-28T20:55:28.602Z" }, + { url = "https://files.pythonhosted.org/packages/11/36/8ed15d243f011b4e5da75cd56d6131c6766f55334d14ba31cce5461f28aa/ruff-0.14.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24c8487194d38b6d71cd0fd17a5b6715cda29f59baca1defe1e3a03240f851d1", size = 13182949, upload-time = "2025-11-28T20:55:33.265Z" }, + { url = "https://files.pythonhosted.org/packages/3b/cf/fcb0b5a195455729834f2a6eadfe2e4519d8ca08c74f6d2b564a4f18f553/ruff-0.14.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79c73db6833f058a4be8ffe4a0913b6d4ad41f6324745179bd2aa09275b01d0b", size = 13816470, upload-time = "2025-11-28T20:55:08.203Z" }, + { url = "https://files.pythonhosted.org/packages/7f/5d/34a4748577ff7a5ed2f2471456740f02e86d1568a18c9faccfc73bd9ca3f/ruff-0.14.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:12eb7014fccff10fc62d15c79d8a6be4d0c2d60fe3f8e4d169a0d2def75f5dad", size = 15289621, upload-time = "2025-11-28T20:55:30.837Z" }, + { url = "https://files.pythonhosted.org/packages/53/53/0a9385f047a858ba133d96f3f8e3c9c66a31cc7c4b445368ef88ebeac209/ruff-0.14.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c623bbdc902de7ff715a93fa3bb377a4e42dd696937bf95669118773dbf0c50", size = 14975817, upload-time = "2025-11-28T20:55:24.107Z" }, + { url = "https://files.pythonhosted.org/packages/a8/d7/2f1c32af54c3b46e7fadbf8006d8b9bcfbea535c316b0bd8813d6fb25e5d/ruff-0.14.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f53accc02ed2d200fa621593cdb3c1ae06aa9b2c3cae70bc96f72f0000ae97a9", size = 14284549, upload-time = "2025-11-28T20:55:06.08Z" }, + { url = "https://files.pythonhosted.org/packages/92/05/434ddd86becd64629c25fb6b4ce7637dd52a45cc4a4415a3008fe61c27b9/ruff-0.14.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:281f0e61a23fcdcffca210591f0f53aafaa15f9025b5b3f9706879aaa8683bc4", size = 14071389, upload-time = "2025-11-28T20:55:35.617Z" }, + { url = "https://files.pythonhosted.org/packages/ff/50/fdf89d4d80f7f9d4f420d26089a79b3bb1538fe44586b148451bc2ba8d9c/ruff-0.14.7-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:dbbaa5e14148965b91cb090236931182ee522a5fac9bc5575bafc5c07b9f9682", size = 14202679, upload-time = "2025-11-28T20:55:01.472Z" }, + { url = "https://files.pythonhosted.org/packages/77/54/87b34988984555425ce967f08a36df0ebd339bb5d9d0e92a47e41151eafc/ruff-0.14.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1464b6e54880c0fe2f2d6eaefb6db15373331414eddf89d6b903767ae2458143", size = 13147677, upload-time = "2025-11-28T20:55:19.933Z" }, + { url = "https://files.pythonhosted.org/packages/67/29/f55e4d44edfe053918a16a3299e758e1c18eef216b7a7092550d7a9ec51c/ruff-0.14.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f217ed871e4621ea6128460df57b19ce0580606c23aeab50f5de425d05226784", size = 13151392, upload-time = "2025-11-28T20:55:21.967Z" }, + { url = "https://files.pythonhosted.org/packages/36/69/47aae6dbd4f1d9b4f7085f4d9dcc84e04561ee7ad067bf52e0f9b02e3209/ruff-0.14.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6be02e849440ed3602d2eb478ff7ff07d53e3758f7948a2a598829660988619e", size = 13412230, upload-time = "2025-11-28T20:55:12.749Z" }, + { url = "https://files.pythonhosted.org/packages/b7/4b/6e96cb6ba297f2ba502a231cd732ed7c3de98b1a896671b932a5eefa3804/ruff-0.14.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19a0f116ee5e2b468dfe80c41c84e2bbd6b74f7b719bee86c2ecde0a34563bcc", size = 14195397, upload-time = "2025-11-28T20:54:56.896Z" }, + { url = "https://files.pythonhosted.org/packages/69/82/251d5f1aa4dcad30aed491b4657cecd9fb4274214da6960ffec144c260f7/ruff-0.14.7-py3-none-win32.whl", hash = "sha256:e33052c9199b347c8937937163b9b149ef6ab2e4bb37b042e593da2e6f6cccfa", size = 13126751, upload-time = "2025-11-28T20:55:03.47Z" }, + { url = "https://files.pythonhosted.org/packages/a8/b5/d0b7d145963136b564806f6584647af45ab98946660d399ec4da79cae036/ruff-0.14.7-py3-none-win_amd64.whl", hash = "sha256:e17a20ad0d3fad47a326d773a042b924d3ac31c6ca6deb6c72e9e6b5f661a7c6", size = 14531726, upload-time = "2025-11-28T20:54:59.121Z" }, + { url = "https://files.pythonhosted.org/packages/1d/d2/1637f4360ada6a368d3265bf39f2cf737a0aaab15ab520fc005903e883f8/ruff-0.14.7-py3-none-win_arm64.whl", hash = "sha256:be4d653d3bea1b19742fcc6502354e32f65cd61ff2fbdb365803ef2c2aec6228", size = 13609215, upload-time = "2025-11-28T20:55:15.375Z" }, +] + +[[package]] +name = "secretstorage" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "jeepney" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1c/03/e834bcd866f2f8a49a85eaff47340affa3bfa391ee9912a952a1faa68c7b/secretstorage-3.5.0.tar.gz", hash = "sha256:f04b8e4689cbce351744d5537bf6b1329c6fc68f91fa666f60a380edddcd11be", size = 19884, upload-time = "2025-11-23T19:02:53.191Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/46/f5af3402b579fd5e11573ce652019a67074317e18c1935cc0b4ba9b35552/secretstorage-3.5.0-py3-none-any.whl", hash = "sha256:0ce65888c0725fcb2c5bc0fdb8e5438eece02c523557ea40ce0703c266248137", size = 15554, upload-time = "2025-11-23T19:02:51.545Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sse-starlette" +version = "3.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/db/3c/fa6517610dc641262b77cc7bf994ecd17465812c1b0585fe33e11be758ab/sse_starlette-3.0.3.tar.gz", hash = "sha256:88cfb08747e16200ea990c8ca876b03910a23b547ab3bd764c0d8eb81019b971", size = 21943, upload-time = "2025-10-30T18:44:20.117Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/a0/984525d19ca5c8a6c33911a0c164b11490dd0f90ff7fd689f704f84e9a11/sse_starlette-3.0.3-py3-none-any.whl", hash = "sha256:af5bf5a6f3933df1d9c7f8539633dc8444ca6a97ab2e2a7cd3b6e431ac03a431", size = 11765, upload-time = "2025-10-30T18:44:18.834Z" }, +] + +[[package]] +name = "starlette" +version = "0.50.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" }, +] + +[[package]] +name = "syrupy" +version = "4.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/57/54/07f40c1e9355c0eb6909b83abd8ea2c523a1e05b8257a575bbaa42df28de/syrupy-4.8.0.tar.gz", hash = "sha256:648f0e9303aaa8387c8365d7314784c09a6bab0a407455c6a01d6a4f5c6a8ede", size = 49526, upload-time = "2024-11-23T23:34:36.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/c7/8cd6b5fa8cc4a5c025d3d36a014dff44436fda8b3003a471253931c77f1b/syrupy-4.8.0-py3-none-any.whl", hash = "sha256:544f4ec6306f4b1c460fdab48fd60b2c7fe54a6c0a8243aeea15f9ad9c638c3f", size = 49530, upload-time = "2024-11-23T23:34:34.697Z" }, +] + +[[package]] +name = "textual" +version = "6.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py", extra = ["linkify"] }, + { name = "mdit-py-plugins" }, + { name = "platformdirs" }, + { name = "pygments" }, + { name = "rich" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/00/9520327698acb6d8ae120b311ef1901840d55a6c41580e377f36261daf7a/textual-6.7.1.tar.gz", hash = "sha256:2a5acb0ab316a7ba9e74b0a291fab8933d681d7cf6f4e1eeb45c39a731b094cf", size = 1580916, upload-time = "2025-12-01T20:57:25.578Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/7a/7f3ea5e6f26d546ee4bd107df8fc9eef9f149dab0f6f15e1fc9f9413231f/textual-6.7.1-py3-none-any.whl", hash = "sha256:b92977ac5941dd37b6b7dc0ac021850ce8d9bf2e123c5bab7ff2016f215272e0", size = 713993, upload-time = "2025-12-01T20:57:23.698Z" }, +] + +[[package]] +name = "tomli-w" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/19/75/241269d1da26b624c0d5e110e8149093c759b7a286138f4efd61a60e75fe/tomli_w-1.2.0.tar.gz", hash = "sha256:2dd14fac5a47c27be9cd4c976af5a12d87fb1f0b4512f81d69cce3b35ae25021", size = 7184, upload-time = "2025-01-15T12:07:24.262Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/18/c86eb8e0202e32dd3df50d43d7ff9854f8e0603945ff398974c1d91ac1ef/tomli_w-1.2.0-py3-none-any.whl", hash = "sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90", size = 6675, upload-time = "2025-01-15T12:07:22.074Z" }, +] + +[[package]] +name = "twine" +version = "6.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "id" }, + { name = "keyring", marker = "platform_machine != 'ppc64le' and platform_machine != 's390x'" }, + { name = "packaging" }, + { name = "readme-renderer" }, + { name = "requests" }, + { name = "requests-toolbelt" }, + { name = "rfc3986" }, + { name = "rich" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e0/a8/949edebe3a82774c1ec34f637f5dd82d1cf22c25e963b7d63771083bbee5/twine-6.2.0.tar.gz", hash = "sha256:e5ed0d2fd70c9959770dce51c8f39c8945c574e18173a7b81802dab51b4b75cf", size = 172262, upload-time = "2025-09-04T15:43:17.255Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/7a/882d99539b19b1490cac5d77c67338d126e4122c8276bf640e411650c830/twine-6.2.0-py3-none-any.whl", hash = "sha256:418ebf08ccda9a8caaebe414433b0ba5e25eb5e4a927667122fbe8f829f985d8", size = 42727, upload-time = "2025-09-04T15:43:15.994Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "typos" +version = "1.40.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/f0/8d988732b10ef72ed82900b055590a210a5ae423b4088d17fa961305ed6b/typos-1.40.0.tar.gz", hash = "sha256:5cb1a04a6291fa1fa358ce6d8cd5b50e396d0a306466b792ac6c246066b1780f", size = 1765534, upload-time = "2025-11-26T20:54:53.792Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/dd/64ceee60d4d1d7d0c90dac8d3bb5ebfcc2e1d1e5b5166f3284abc4052e45/typos-1.40.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:71441cb06044baba29911e4b6500a85b2e915736d1fc0a54d5f575addb12a307", size = 3507274, upload-time = "2025-11-26T20:54:39.564Z" }, + { url = "https://files.pythonhosted.org/packages/18/db/64f7146b86e912041aafe275f627081e4bd005f71932f5280cf0c3944f2b/typos-1.40.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:269e411f342126b06f38936eba9d391a41442c17425e57068797c9e6997e3fca", size = 3391108, upload-time = "2025-11-26T20:54:41.462Z" }, + { url = "https://files.pythonhosted.org/packages/9d/f1/1eead106cc0c025319d23ccff78aa7b9c86a8a918f62359180f119deb96b/typos-1.40.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78d4d7be7e6f61c1bbec01abd9ee2e08254f633b845a9d2c5786051832c3e0c1", size = 8215390, upload-time = "2025-11-26T20:54:43.01Z" }, + { url = "https://files.pythonhosted.org/packages/82/c9/dc027ec8819d1c652d80ac2c3b6216dcc4c6d198907e2c2ed29cd4710685/typos-1.40.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4dbc419aed7cd4b9e8ec71a28045a3b6262fa5a41170734a3fc4dfdf1e7d7a51", size = 7192543, upload-time = "2025-11-26T20:54:44.616Z" }, + { url = "https://files.pythonhosted.org/packages/1a/db/f6fef0f4d173f501b469a90ed3d462bf7e4301a28507b7914cefa1d78ca1/typos-1.40.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0701400559effc6806a043dac55e1b77fc09e540661bf4315eaf55a628138214", size = 7729297, upload-time = "2025-11-26T20:54:46.122Z" }, + { url = "https://files.pythonhosted.org/packages/fe/a4/bb5b415cd352168550170ba5bb7c6b1c53fe457084df5ff07488c525dca6/typos-1.40.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:41ed67ad7cba724841f72d5c7c69de20f79dbee52917fb1fb5f3efa327d44cd3", size = 7107127, upload-time = "2025-11-26T20:54:47.974Z" }, + { url = "https://files.pythonhosted.org/packages/1d/92/1a39cea9ba7369555ed3f540b48ed5fd6f059ec89e24fb87dd21df69bf2a/typos-1.40.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:47764e89fca194b77ff65741b1527210096e39984b2c460ba5bc4868ea05ea88", size = 8141765, upload-time = "2025-11-26T20:54:49.461Z" }, + { url = "https://files.pythonhosted.org/packages/09/64/7d28b539b6d09b59ed3ea13f54e74e0cb8409fff3174928ee2f98ca349fb/typos-1.40.0-py3-none-win32.whl", hash = "sha256:9cd19efd5a3abcc788770ffb9a070f39da0d97c4aadd7eaf471e744a02002464", size = 3065525, upload-time = "2025-11-26T20:54:51.112Z" }, + { url = "https://files.pythonhosted.org/packages/49/0a/e324e17a0407dfe2459ecd8c467b0b3953ec5c553bd552949fdc238bec91/typos-1.40.0-py3-none-win_amd64.whl", hash = "sha256:69c47f0b899bc62d87d6fc431824348782e76dca1867115976915a197b0a1fd2", size = 3254935, upload-time = "2025-11-26T20:54:52.458Z" }, +] + +[[package]] +name = "uc-micro-py" +version = "1.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/ce/f06b84e2697fef4688ca63bdb2fdf113ca0a3be33f94488f2cadb690b0cf/uvicorn-0.38.0.tar.gz", hash = "sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d", size = 80605, upload-time = "2025-10-18T13:46:44.63Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/d9/d88e73ca598f4f6ff671fb5fde8a32925c2e08a637303a1d12883c7305fa/uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02", size = 68109, upload-time = "2025-10-18T13:46:42.958Z" }, +] + +[[package]] +name = "virtualenv" +version = "20.35.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/28/e6f1a6f655d620846bd9df527390ecc26b3805a0c5989048c210e22c5ca9/virtualenv-20.35.4.tar.gz", hash = "sha256:643d3914d73d3eeb0c552cbb12d7e82adf0e504dbf86a3182f8771a153a1971c", size = 6028799, upload-time = "2025-10-29T06:57:40.511Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/0c/c05523fa3181fdf0c9c52a6ba91a23fbf3246cc095f26f6516f9c60e6771/virtualenv-20.35.4-py3-none-any.whl", hash = "sha256:c21c9cede36c9753eeade68ba7d523529f228a403463376cf821eaae2b650f1b", size = 6005095, upload-time = "2025-10-29T06:57:37.598Z" }, +] + +[[package]] +name = "vulture" +version = "2.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/25/925f35db758a0f9199113aaf61d703de891676b082bd7cf73ea01d6000f7/vulture-2.14.tar.gz", hash = "sha256:cb8277902a1138deeab796ec5bef7076a6e0248ca3607a3f3dee0b6d9e9b8415", size = 58823, upload-time = "2024-12-08T17:39:43.319Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/56/0cc15b8ff2613c1d5c3dc1f3f576ede1c43868c1bc2e5ccaa2d4bcd7974d/vulture-2.14-py2.py3-none-any.whl", hash = "sha256:d9a90dba89607489548a49d557f8bac8112bd25d3cbc8aeef23e860811bd5ed9", size = 28915, upload-time = "2024-12-08T17:39:40.573Z" }, +] + +[[package]] +name = "watchfiles" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c2/c9/8869df9b2a2d6c59d79220a4db37679e74f807c559ffe5265e08b227a210/watchfiles-1.1.1.tar.gz", hash = "sha256:a173cb5c16c4f40ab19cecf48a534c409f7ea983ab8fed0741304a1c0a31b3f2", size = 94440, upload-time = "2025-10-14T15:06:21.08Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/74/d5/f039e7e3c639d9b1d09b07ea412a6806d38123f0508e5f9b48a87b0a76cc/watchfiles-1.1.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:8c89f9f2f740a6b7dcc753140dd5e1ab9215966f7a3530d0c0705c83b401bd7d", size = 404745, upload-time = "2025-10-14T15:04:46.731Z" }, + { url = "https://files.pythonhosted.org/packages/a5/96/a881a13aa1349827490dab2d363c8039527060cfcc2c92cc6d13d1b1049e/watchfiles-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd404be08018c37350f0d6e34676bd1e2889990117a2b90070b3007f172d0610", size = 391769, upload-time = "2025-10-14T15:04:48.003Z" }, + { url = "https://files.pythonhosted.org/packages/4b/5b/d3b460364aeb8da471c1989238ea0e56bec24b6042a68046adf3d9ddb01c/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8526e8f916bb5b9a0a777c8317c23ce65de259422bba5b31325a6fa6029d33af", size = 449374, upload-time = "2025-10-14T15:04:49.179Z" }, + { url = "https://files.pythonhosted.org/packages/b9/44/5769cb62d4ed055cb17417c0a109a92f007114a4e07f30812a73a4efdb11/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2edc3553362b1c38d9f06242416a5d8e9fe235c204a4072e988ce2e5bb1f69f6", size = 459485, upload-time = "2025-10-14T15:04:50.155Z" }, + { url = "https://files.pythonhosted.org/packages/19/0c/286b6301ded2eccd4ffd0041a1b726afda999926cf720aab63adb68a1e36/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30f7da3fb3f2844259cba4720c3fc7138eb0f7b659c38f3bfa65084c7fc7abce", size = 488813, upload-time = "2025-10-14T15:04:51.059Z" }, + { url = "https://files.pythonhosted.org/packages/c7/2b/8530ed41112dd4a22f4dcfdb5ccf6a1baad1ff6eed8dc5a5f09e7e8c41c7/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8979280bdafff686ba5e4d8f97840f929a87ed9cdf133cbbd42f7766774d2aa", size = 594816, upload-time = "2025-10-14T15:04:52.031Z" }, + { url = "https://files.pythonhosted.org/packages/ce/d2/f5f9fb49489f184f18470d4f99f4e862a4b3e9ac2865688eb2099e3d837a/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcc5c24523771db3a294c77d94771abcfcb82a0e0ee8efd910c37c59ec1b31bb", size = 475186, upload-time = "2025-10-14T15:04:53.064Z" }, + { url = "https://files.pythonhosted.org/packages/cf/68/5707da262a119fb06fbe214d82dd1fe4a6f4af32d2d14de368d0349eb52a/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db5d7ae38ff20153d542460752ff397fcf5c96090c1230803713cf3147a6803", size = 456812, upload-time = "2025-10-14T15:04:55.174Z" }, + { url = "https://files.pythonhosted.org/packages/66/ab/3cbb8756323e8f9b6f9acb9ef4ec26d42b2109bce830cc1f3468df20511d/watchfiles-1.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:28475ddbde92df1874b6c5c8aaeb24ad5be47a11f87cde5a28ef3835932e3e94", size = 630196, upload-time = "2025-10-14T15:04:56.22Z" }, + { url = "https://files.pythonhosted.org/packages/78/46/7152ec29b8335f80167928944a94955015a345440f524d2dfe63fc2f437b/watchfiles-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:36193ed342f5b9842edd3532729a2ad55c4160ffcfa3700e0d54be496b70dd43", size = 622657, upload-time = "2025-10-14T15:04:57.521Z" }, + { url = "https://files.pythonhosted.org/packages/0a/bf/95895e78dd75efe9a7f31733607f384b42eb5feb54bd2eb6ed57cc2e94f4/watchfiles-1.1.1-cp312-cp312-win32.whl", hash = "sha256:859e43a1951717cc8de7f4c77674a6d389b106361585951d9e69572823f311d9", size = 272042, upload-time = "2025-10-14T15:04:59.046Z" }, + { url = "https://files.pythonhosted.org/packages/87/0a/90eb755f568de2688cb220171c4191df932232c20946966c27a59c400850/watchfiles-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:91d4c9a823a8c987cce8fa2690923b069966dabb196dd8d137ea2cede885fde9", size = 288410, upload-time = "2025-10-14T15:05:00.081Z" }, + { url = "https://files.pythonhosted.org/packages/36/76/f322701530586922fbd6723c4f91ace21364924822a8772c549483abed13/watchfiles-1.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:a625815d4a2bdca61953dbba5a39d60164451ef34c88d751f6c368c3ea73d404", size = 278209, upload-time = "2025-10-14T15:05:01.168Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f4/f750b29225fe77139f7ae5de89d4949f5a99f934c65a1f1c0b248f26f747/watchfiles-1.1.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:130e4876309e8686a5e37dba7d5e9bc77e6ed908266996ca26572437a5271e18", size = 404321, upload-time = "2025-10-14T15:05:02.063Z" }, + { url = "https://files.pythonhosted.org/packages/2b/f9/f07a295cde762644aa4c4bb0f88921d2d141af45e735b965fb2e87858328/watchfiles-1.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5f3bde70f157f84ece3765b42b4a52c6ac1a50334903c6eaf765362f6ccca88a", size = 391783, upload-time = "2025-10-14T15:05:03.052Z" }, + { url = "https://files.pythonhosted.org/packages/bc/11/fc2502457e0bea39a5c958d86d2cb69e407a4d00b85735ca724bfa6e0d1a/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e0b1fe858430fc0251737ef3824c54027bedb8c37c38114488b8e131cf8219", size = 449279, upload-time = "2025-10-14T15:05:04.004Z" }, + { url = "https://files.pythonhosted.org/packages/e3/1f/d66bc15ea0b728df3ed96a539c777acfcad0eb78555ad9efcaa1274688f0/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f27db948078f3823a6bb3b465180db8ebecf26dd5dae6f6180bd87383b6b4428", size = 459405, upload-time = "2025-10-14T15:05:04.942Z" }, + { url = "https://files.pythonhosted.org/packages/be/90/9f4a65c0aec3ccf032703e6db02d89a157462fbb2cf20dd415128251cac0/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059098c3a429f62fc98e8ec62b982230ef2c8df68c79e826e37b895bc359a9c0", size = 488976, upload-time = "2025-10-14T15:05:05.905Z" }, + { url = "https://files.pythonhosted.org/packages/37/57/ee347af605d867f712be7029bb94c8c071732a4b44792e3176fa3c612d39/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfb5862016acc9b869bb57284e6cb35fdf8e22fe59f7548858e2f971d045f150", size = 595506, upload-time = "2025-10-14T15:05:06.906Z" }, + { url = "https://files.pythonhosted.org/packages/a8/78/cc5ab0b86c122047f75e8fc471c67a04dee395daf847d3e59381996c8707/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:319b27255aacd9923b8a276bb14d21a5f7ff82564c744235fc5eae58d95422ae", size = 474936, upload-time = "2025-10-14T15:05:07.906Z" }, + { url = "https://files.pythonhosted.org/packages/62/da/def65b170a3815af7bd40a3e7010bf6ab53089ef1b75d05dd5385b87cf08/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c755367e51db90e75b19454b680903631d41f9e3607fbd941d296a020c2d752d", size = 456147, upload-time = "2025-10-14T15:05:09.138Z" }, + { url = "https://files.pythonhosted.org/packages/57/99/da6573ba71166e82d288d4df0839128004c67d2778d3b566c138695f5c0b/watchfiles-1.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c22c776292a23bfc7237a98f791b9ad3144b02116ff10d820829ce62dff46d0b", size = 630007, upload-time = "2025-10-14T15:05:10.117Z" }, + { url = "https://files.pythonhosted.org/packages/a8/51/7439c4dd39511368849eb1e53279cd3454b4a4dbace80bab88feeb83c6b5/watchfiles-1.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3a476189be23c3686bc2f4321dd501cb329c0a0469e77b7b534ee10129ae6374", size = 622280, upload-time = "2025-10-14T15:05:11.146Z" }, + { url = "https://files.pythonhosted.org/packages/95/9c/8ed97d4bba5db6fdcdb2b298d3898f2dd5c20f6b73aee04eabe56c59677e/watchfiles-1.1.1-cp313-cp313-win32.whl", hash = "sha256:bf0a91bfb5574a2f7fc223cf95eeea79abfefa404bf1ea5e339c0c1560ae99a0", size = 272056, upload-time = "2025-10-14T15:05:12.156Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/c14e28429f744a260d8ceae18bf58c1d5fa56b50d006a7a9f80e1882cb0d/watchfiles-1.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:52e06553899e11e8074503c8e716d574adeeb7e68913115c4b3653c53f9bae42", size = 288162, upload-time = "2025-10-14T15:05:13.208Z" }, + { url = "https://files.pythonhosted.org/packages/dc/61/fe0e56c40d5cd29523e398d31153218718c5786b5e636d9ae8ae79453d27/watchfiles-1.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:ac3cc5759570cd02662b15fbcd9d917f7ecd47efe0d6b40474eafd246f91ea18", size = 277909, upload-time = "2025-10-14T15:05:14.49Z" }, + { url = "https://files.pythonhosted.org/packages/79/42/e0a7d749626f1e28c7108a99fb9bf524b501bbbeb9b261ceecde644d5a07/watchfiles-1.1.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:563b116874a9a7ce6f96f87cd0b94f7faf92d08d0021e837796f0a14318ef8da", size = 403389, upload-time = "2025-10-14T15:05:15.777Z" }, + { url = "https://files.pythonhosted.org/packages/15/49/08732f90ce0fbbc13913f9f215c689cfc9ced345fb1bcd8829a50007cc8d/watchfiles-1.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3ad9fe1dae4ab4212d8c91e80b832425e24f421703b5a42ef2e4a1e215aff051", size = 389964, upload-time = "2025-10-14T15:05:16.85Z" }, + { url = "https://files.pythonhosted.org/packages/27/0d/7c315d4bd5f2538910491a0393c56bf70d333d51bc5b34bee8e68e8cea19/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce70f96a46b894b36eba678f153f052967a0d06d5b5a19b336ab0dbbd029f73e", size = 448114, upload-time = "2025-10-14T15:05:17.876Z" }, + { url = "https://files.pythonhosted.org/packages/c3/24/9e096de47a4d11bc4df41e9d1e61776393eac4cb6eb11b3e23315b78b2cc/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cb467c999c2eff23a6417e58d75e5828716f42ed8289fe6b77a7e5a91036ca70", size = 460264, upload-time = "2025-10-14T15:05:18.962Z" }, + { url = "https://files.pythonhosted.org/packages/cc/0f/e8dea6375f1d3ba5fcb0b3583e2b493e77379834c74fd5a22d66d85d6540/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:836398932192dae4146c8f6f737d74baeac8b70ce14831a239bdb1ca882fc261", size = 487877, upload-time = "2025-10-14T15:05:20.094Z" }, + { url = "https://files.pythonhosted.org/packages/ac/5b/df24cfc6424a12deb41503b64d42fbea6b8cb357ec62ca84a5a3476f654a/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:743185e7372b7bc7c389e1badcc606931a827112fbbd37f14c537320fca08620", size = 595176, upload-time = "2025-10-14T15:05:21.134Z" }, + { url = "https://files.pythonhosted.org/packages/8f/b5/853b6757f7347de4e9b37e8cc3289283fb983cba1ab4d2d7144694871d9c/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afaeff7696e0ad9f02cbb8f56365ff4686ab205fcf9c4c5b6fdfaaa16549dd04", size = 473577, upload-time = "2025-10-14T15:05:22.306Z" }, + { url = "https://files.pythonhosted.org/packages/e1/f7/0a4467be0a56e80447c8529c9fce5b38eab4f513cb3d9bf82e7392a5696b/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7eb7da0eb23aa2ba036d4f616d46906013a68caf61b7fdbe42fc8b25132e77", size = 455425, upload-time = "2025-10-14T15:05:23.348Z" }, + { url = "https://files.pythonhosted.org/packages/8e/e0/82583485ea00137ddf69bc84a2db88bd92ab4a6e3c405e5fb878ead8d0e7/watchfiles-1.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:831a62658609f0e5c64178211c942ace999517f5770fe9436be4c2faeba0c0ef", size = 628826, upload-time = "2025-10-14T15:05:24.398Z" }, + { url = "https://files.pythonhosted.org/packages/28/9a/a785356fccf9fae84c0cc90570f11702ae9571036fb25932f1242c82191c/watchfiles-1.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:f9a2ae5c91cecc9edd47e041a930490c31c3afb1f5e6d71de3dc671bfaca02bf", size = 622208, upload-time = "2025-10-14T15:05:25.45Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f4/0872229324ef69b2c3edec35e84bd57a1289e7d3fe74588048ed8947a323/watchfiles-1.1.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:d1715143123baeeaeadec0528bb7441103979a1d5f6fd0e1f915383fea7ea6d5", size = 404315, upload-time = "2025-10-14T15:05:26.501Z" }, + { url = "https://files.pythonhosted.org/packages/7b/22/16d5331eaed1cb107b873f6ae1b69e9ced582fcf0c59a50cd84f403b1c32/watchfiles-1.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:39574d6370c4579d7f5d0ad940ce5b20db0e4117444e39b6d8f99db5676c52fd", size = 390869, upload-time = "2025-10-14T15:05:27.649Z" }, + { url = "https://files.pythonhosted.org/packages/b2/7e/5643bfff5acb6539b18483128fdc0ef2cccc94a5b8fbda130c823e8ed636/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7365b92c2e69ee952902e8f70f3ba6360d0d596d9299d55d7d386df84b6941fb", size = 449919, upload-time = "2025-10-14T15:05:28.701Z" }, + { url = "https://files.pythonhosted.org/packages/51/2e/c410993ba5025a9f9357c376f48976ef0e1b1aefb73b97a5ae01a5972755/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfff9740c69c0e4ed32416f013f3c45e2ae42ccedd1167ef2d805c000b6c71a5", size = 460845, upload-time = "2025-10-14T15:05:30.064Z" }, + { url = "https://files.pythonhosted.org/packages/8e/a4/2df3b404469122e8680f0fcd06079317e48db58a2da2950fb45020947734/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b27cf2eb1dda37b2089e3907d8ea92922b673c0c427886d4edc6b94d8dfe5db3", size = 489027, upload-time = "2025-10-14T15:05:31.064Z" }, + { url = "https://files.pythonhosted.org/packages/ea/84/4587ba5b1f267167ee715b7f66e6382cca6938e0a4b870adad93e44747e6/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:526e86aced14a65a5b0ec50827c745597c782ff46b571dbfe46192ab9e0b3c33", size = 595615, upload-time = "2025-10-14T15:05:32.074Z" }, + { url = "https://files.pythonhosted.org/packages/6a/0f/c6988c91d06e93cd0bb3d4a808bcf32375ca1904609835c3031799e3ecae/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04e78dd0b6352db95507fd8cb46f39d185cf8c74e4cf1e4fbad1d3df96faf510", size = 474836, upload-time = "2025-10-14T15:05:33.209Z" }, + { url = "https://files.pythonhosted.org/packages/b4/36/ded8aebea91919485b7bbabbd14f5f359326cb5ec218cd67074d1e426d74/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c85794a4cfa094714fb9c08d4a218375b2b95b8ed1666e8677c349906246c05", size = 455099, upload-time = "2025-10-14T15:05:34.189Z" }, + { url = "https://files.pythonhosted.org/packages/98/e0/8c9bdba88af756a2fce230dd365fab2baf927ba42cd47521ee7498fd5211/watchfiles-1.1.1-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:74d5012b7630714b66be7b7b7a78855ef7ad58e8650c73afc4c076a1f480a8d6", size = 630626, upload-time = "2025-10-14T15:05:35.216Z" }, + { url = "https://files.pythonhosted.org/packages/2a/84/a95db05354bf2d19e438520d92a8ca475e578c647f78f53197f5a2f17aaf/watchfiles-1.1.1-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:8fbe85cb3201c7d380d3d0b90e63d520f15d6afe217165d7f98c9c649654db81", size = 622519, upload-time = "2025-10-14T15:05:36.259Z" }, + { url = "https://files.pythonhosted.org/packages/1d/ce/d8acdc8de545de995c339be67711e474c77d643555a9bb74a9334252bd55/watchfiles-1.1.1-cp314-cp314-win32.whl", hash = "sha256:3fa0b59c92278b5a7800d3ee7733da9d096d4aabcfabb9a928918bd276ef9b9b", size = 272078, upload-time = "2025-10-14T15:05:37.63Z" }, + { url = "https://files.pythonhosted.org/packages/c4/c9/a74487f72d0451524be827e8edec251da0cc1fcf111646a511ae752e1a3d/watchfiles-1.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:c2047d0b6cea13b3316bdbafbfa0c4228ae593d995030fda39089d36e64fc03a", size = 287664, upload-time = "2025-10-14T15:05:38.95Z" }, + { url = "https://files.pythonhosted.org/packages/df/b8/8ac000702cdd496cdce998c6f4ee0ca1f15977bba51bdf07d872ebdfc34c/watchfiles-1.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:842178b126593addc05acf6fce960d28bc5fae7afbaa2c6c1b3a7b9460e5be02", size = 277154, upload-time = "2025-10-14T15:05:39.954Z" }, + { url = "https://files.pythonhosted.org/packages/47/a8/e3af2184707c29f0f14b1963c0aace6529f9d1b8582d5b99f31bbf42f59e/watchfiles-1.1.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:88863fbbc1a7312972f1c511f202eb30866370ebb8493aef2812b9ff28156a21", size = 403820, upload-time = "2025-10-14T15:05:40.932Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ec/e47e307c2f4bd75f9f9e8afbe3876679b18e1bcec449beca132a1c5ffb2d/watchfiles-1.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:55c7475190662e202c08c6c0f4d9e345a29367438cf8e8037f3155e10a88d5a5", size = 390510, upload-time = "2025-10-14T15:05:41.945Z" }, + { url = "https://files.pythonhosted.org/packages/d5/a0/ad235642118090f66e7b2f18fd5c42082418404a79205cdfca50b6309c13/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f53fa183d53a1d7a8852277c92b967ae99c2d4dcee2bfacff8868e6e30b15f7", size = 448408, upload-time = "2025-10-14T15:05:43.385Z" }, + { url = "https://files.pythonhosted.org/packages/df/85/97fa10fd5ff3332ae17e7e40e20784e419e28521549780869f1413742e9d/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6aae418a8b323732fa89721d86f39ec8f092fc2af67f4217a2b07fd3e93c6101", size = 458968, upload-time = "2025-10-14T15:05:44.404Z" }, + { url = "https://files.pythonhosted.org/packages/47/c2/9059c2e8966ea5ce678166617a7f75ecba6164375f3b288e50a40dc6d489/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f096076119da54a6080e8920cbdaac3dbee667eb91dcc5e5b78840b87415bd44", size = 488096, upload-time = "2025-10-14T15:05:45.398Z" }, + { url = "https://files.pythonhosted.org/packages/94/44/d90a9ec8ac309bc26db808a13e7bfc0e4e78b6fc051078a554e132e80160/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00485f441d183717038ed2e887a7c868154f216877653121068107b227a2f64c", size = 596040, upload-time = "2025-10-14T15:05:46.502Z" }, + { url = "https://files.pythonhosted.org/packages/95/68/4e3479b20ca305cfc561db3ed207a8a1c745ee32bf24f2026a129d0ddb6e/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a55f3e9e493158d7bfdb60a1165035f1cf7d320914e7b7ea83fe22c6023b58fc", size = 473847, upload-time = "2025-10-14T15:05:47.484Z" }, + { url = "https://files.pythonhosted.org/packages/4f/55/2af26693fd15165c4ff7857e38330e1b61ab8c37d15dc79118cdba115b7a/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c91ed27800188c2ae96d16e3149f199d62f86c7af5f5f4d2c61a3ed8cd3666c", size = 455072, upload-time = "2025-10-14T15:05:48.928Z" }, + { url = "https://files.pythonhosted.org/packages/66/1d/d0d200b10c9311ec25d2273f8aad8c3ef7cc7ea11808022501811208a750/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:311ff15a0bae3714ffb603e6ba6dbfba4065ab60865d15a6ec544133bdb21099", size = 629104, upload-time = "2025-10-14T15:05:49.908Z" }, + { url = "https://files.pythonhosted.org/packages/e3/bd/fa9bb053192491b3867ba07d2343d9f2252e00811567d30ae8d0f78136fe/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:a916a2932da8f8ab582f242c065f5c81bed3462849ca79ee357dd9551b0e9b01", size = 622112, upload-time = "2025-10-14T15:05:50.941Z" }, +] diff --git a/vibe-acp.spec b/vibe-acp.spec new file mode 100644 index 0000000..9118f81 --- /dev/null +++ b/vibe-acp.spec @@ -0,0 +1,45 @@ +# -*- mode: python ; coding: utf-8 -*- + + +a = Analysis( + ['vibe/acp/entrypoint.py'], + pathex=[], + binaries=[], + datas=[ + # By default, pyinstaller doesn't include the .md files + ('vibe/core/prompts/*.md', 'vibe/core/prompts'), + ('vibe/core/tools/builtins/prompts/*.md', 'vibe/core/tools/builtins/prompts'), + # This is necessary because tools are dynamically called in vibe, meaning there is no static reference to those files + ('vibe/core/tools/builtins/*.py', 'vibe/core/tools/builtins'), + ('vibe/acp/tools/builtins/*.py', 'vibe/acp/tools/builtins'), + ], + hiddenimports=[], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + noarchive=False, + optimize=0, +) +pyz = PYZ(a.pure) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.datas, + [], + name='vibe-acp', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) diff --git a/vibe/__init__.py b/vibe/__init__.py new file mode 100644 index 0000000..9643191 --- /dev/null +++ b/vibe/__init__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from pathlib import Path + +VIBE_ROOT = Path(__file__).parent diff --git a/vibe/acp/__init__.py b/vibe/acp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/acp/acp_agent.py b/vibe/acp/acp_agent.py new file mode 100644 index 0000000..b8c610d --- /dev/null +++ b/vibe/acp/acp_agent.py @@ -0,0 +1,441 @@ +from __future__ import annotations + +import asyncio +from collections.abc import AsyncGenerator +from pathlib import Path +import sys +from typing import Any, cast, override + +from acp import ( + PROTOCOL_VERSION, + Agent as AcpAgent, + AgentSideConnection, + AuthenticateRequest, + CancelNotification, + InitializeRequest, + InitializeResponse, + LoadSessionRequest, + NewSessionRequest, + NewSessionResponse, + PromptRequest, + PromptResponse, + RequestError, + RequestPermissionRequest, + SessionNotification, + SetSessionModelRequest, + SetSessionModelResponse, + SetSessionModeRequest, + SetSessionModeResponse, + stdio_streams, +) +from acp.helpers import ContentBlock, SessionUpdate +from acp.schema import ( + AgentCapabilities, + AgentMessageChunk, + AllowedOutcome, + AuthenticateResponse, + AuthMethod, + Implementation, + ModelInfo, + PromptCapabilities, + SessionModelState, + SessionModeState, + TextContentBlock, + TextResourceContents, + ToolCall, +) +from pydantic import BaseModel, ConfigDict + +from vibe import VIBE_ROOT +from vibe.acp.tools.base import BaseAcpTool +from vibe.acp.tools.session_update import ( + tool_call_session_update, + tool_result_session_update, +) +from vibe.acp.utils import TOOL_OPTIONS, ToolOption, VibeSessionMode +from vibe.core import __version__ +from vibe.core.agent import Agent as VibeAgent +from vibe.core.autocompletion.path_prompt_adapter import render_path_prompt +from vibe.core.config import MissingAPIKeyError, VibeConfig, load_api_keys_from_env +from vibe.core.types import ( + AssistantEvent, + AsyncApprovalCallback, + ToolCallEvent, + ToolResultEvent, +) +from vibe.core.utils import CancellationReason, get_user_cancellation_message + + +class AcpSession(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + id: str + agent: VibeAgent + mode_id: VibeSessionMode = VibeSessionMode.APPROVAL_REQUIRED + task: asyncio.Task[None] | None = None + + +class VibeAcpAgent(AcpAgent): + def __init__(self, connection: AgentSideConnection) -> None: + self.sessions: dict[str, AcpSession] = {} + self.connection = connection + self.client_capabilities = None + + @override + async def initialize(self, params: InitializeRequest) -> InitializeResponse: + self.client_capabilities = params.clientCapabilities + + # The ACP Agent process can be launched in 3 different ways, depending on installation + # - dev mode: `uv run vibe-acp`, ran from the project root + # - uv tool install: `vibe-acp`, similar to dev mode, but uv takes care of path resolution + # - bundled binary: `./vibe-acp` from binary location + # The 2 first modes are working similarly, under the hood uv runs `/some/python /my/entrypoint.py`` + # The last mode is quite different as our bundler also includes the python install. + # So sys.executable is already /path/to/binary/vibe-acp. + # For this reason, we make a distinction in the way we call the setup command + command = sys.executable + if "python" not in Path(command).name: + # It's the case for bundled binaries, we don't need any other arguments + args = ["--setup"] + else: + script_name = sys.argv[0] + args = [script_name, "--setup"] + + auth_methods = [ + AuthMethod( + id="vibe-setup", + name="Register your API Key", + description="Register your API Key inside Mistral Vibe", + field_meta={ + "terminal-auth": { + "command": command, + "args": args, + "label": "Mistral Vibe Setup", + } + }, + ) + ] + + response = InitializeResponse( + agentCapabilities=AgentCapabilities( + loadSession=False, + promptCapabilities=PromptCapabilities( + audio=False, embeddedContext=True, image=False + ), + ), + protocolVersion=PROTOCOL_VERSION, + agentInfo=Implementation( + name="@mistralai/mistral-vibe", + title="Mistral Vibe", + version=__version__, + ), + authMethods=auth_methods, + ) + return response + + @override + async def authenticate( + self, params: AuthenticateRequest + ) -> AuthenticateResponse | None: + raise NotImplementedError("Not implemented yet") + + @override + async def newSession(self, params: NewSessionRequest) -> NewSessionResponse: + capability_disabled_tools = self._get_disabled_tools_from_capabilities() + load_api_keys_from_env() + try: + config = VibeConfig.load( + workdir=Path(params.cwd), + tool_paths=[str(VIBE_ROOT / "acp" / "tools" / "builtins")], + disabled_tools=capability_disabled_tools, + ) + except MissingAPIKeyError as e: + raise RequestError.auth_required({ + "message": "You must be authenticated before creating a new session" + }) from e + + agent = VibeAgent(config=config, auto_approve=False, enable_streaming=True) + # NOTE: For now, we pin session.id to agent.session_id right after init time. + # We should just use agent.session_id everywhere, but it can still change during + # session lifetime (e.g. agent.compact is called). + # We should refactor agent.session_id to make it immutable in ACP context. + session = AcpSession(id=agent.session_id, agent=agent) + self.sessions[session.id] = session + + if not agent.auto_approve: + agent.set_approval_callback( + self._create_approval_callback(agent.session_id) + ) + + response = NewSessionResponse( + sessionId=agent.session_id, + models=SessionModelState( + currentModelId=agent.config.active_model, + availableModels=[ + ModelInfo(modelId=model.alias, name=model.alias) + for model in agent.config.models + ], + ), + modes=SessionModeState( + currentModeId=session.mode_id, + availableModes=VibeSessionMode.get_all_acp_session_modes(), + ), + ) + return response + + def _get_disabled_tools_from_capabilities(self) -> list[str]: + if not self.client_capabilities: + return [] + + disabled: list[str] = [] + + if not self.client_capabilities.terminal: + disabled.append("bash") + + if fs := self.client_capabilities.fs: + if not fs.readTextFile: + disabled.append("read_file") + if not fs.writeTextFile: + disabled.append("write_file") + disabled.append("search_replace") + + return disabled + + def _create_approval_callback(self, session_id: str) -> AsyncApprovalCallback: + + async def approval_callback( + tool_name: str, args: dict[str, Any], tool_call_id: str + ) -> tuple[str, str | None]: + # Create the tool call update + tool_call = ToolCall(toolCallId=tool_call_id) + + # Request permission from the user + request = RequestPermissionRequest( + sessionId=session_id, toolCall=tool_call, options=TOOL_OPTIONS + ) + + response = await self.connection.requestPermission(request) + + # Parse the response using isinstance for proper type narrowing + if response.outcome.outcome == "selected": + outcome = cast(AllowedOutcome, response.outcome) + return self._handle_permission_selection(outcome.optionId) + else: + return ( + "n", + str( + get_user_cancellation_message( + CancellationReason.OPERATION_CANCELLED + ) + ), + ) + + return approval_callback + + @staticmethod + def _handle_permission_selection(option_id: str) -> tuple[str, str | None]: + match option_id: + case ToolOption.ALLOW_ONCE: + return ("y", None) + case ToolOption.ALLOW_ALWAYS: + return ("a", None) + case ToolOption.REJECT_ONCE: + return ("n", "User rejected the tool call, provide an alternative plan") + case _: + return ("n", f"Unknown option: {option_id}") + + def _get_session(self, session_id: str) -> AcpSession: + if session_id not in self.sessions: + raise RequestError.invalid_params({"session": "Not found"}) + return self.sessions[session_id] + + @override + async def loadSession(self, params: LoadSessionRequest) -> None: + raise NotImplementedError() + + @override + async def setSessionMode( + self, params: SetSessionModeRequest + ) -> SetSessionModeResponse | None: + session = self._get_session(params.sessionId) + + if not VibeSessionMode.is_valid(params.modeId): + return None + + session.mode_id = VibeSessionMode(params.modeId) + session.agent.auto_approve = params.modeId == VibeSessionMode.AUTO_APPROVE + + return SetSessionModeResponse() + + @override + async def setSessionModel( + self, params: SetSessionModelRequest + ) -> SetSessionModelResponse | None: + session = self._get_session(params.sessionId) + + model_aliases = [model.alias for model in session.agent.config.models] + if params.modelId not in model_aliases: + return None + + VibeConfig.save_updates({"active_model": params.modelId}) + + new_config = VibeConfig.load( + workdir=session.agent.config.workdir, + tool_paths=session.agent.config.tool_paths, + disabled_tools=self._get_disabled_tools_from_capabilities(), + ) + + await session.agent.reload_with_initial_messages(config=new_config) + + return SetSessionModelResponse() + + @override + async def prompt(self, params: PromptRequest) -> PromptResponse: + session = self._get_session(params.sessionId) + + if session.task is not None: + raise RuntimeError( + "Concurrent prompts are not supported yet, wait for agent to finish" + ) + + text_prompt = self._build_text_prompt(params.prompt) + + async def agent_task() -> None: + async for update in self._run_agent_loop(session, text_prompt): + await self.connection.sessionUpdate( + SessionNotification(sessionId=session.id, update=update) + ) + + try: + session.task = asyncio.create_task(agent_task()) + await session.task + + except asyncio.CancelledError: + return PromptResponse(stopReason="cancelled") + + except Exception as e: + await self.connection.sessionUpdate( + SessionNotification( + sessionId=params.sessionId, + update=AgentMessageChunk( + sessionUpdate="agent_message_chunk", + content=TextContentBlock(type="text", text=f"Error: {e!s}"), + ), + ) + ) + + return PromptResponse(stopReason="refusal") + + finally: + session.task = None + + return PromptResponse(stopReason="end_turn") + + def _build_text_prompt(self, acp_prompt: list[ContentBlock]) -> str: + text_prompt = "" + for block in acp_prompt: + separator = "\n\n" if text_prompt else "" + match block.type: + # NOTE: ACP supports annotations, but we don't use them here yet. + case "text": + text_prompt = f"{text_prompt}{separator}{block.text}" + case "resource": + block_content = ( + block.resource.text + if isinstance(block.resource, TextResourceContents) + else block.resource.blob + ) + fields = {"path": block.resource.uri, "content": block_content} + parts = [ + f"{k}: {v}" + for k, v in fields.items() + if v is not None and (v or isinstance(v, (int, float))) + ] + block_prompt = "\n".join(parts) + text_prompt = f"{text_prompt}{separator}{block_prompt}" + case "resource_link": + # NOTE: we currently keep more information than just the URI + # making it more detailed than the output of the read_file tool. + # This is OK, but might be worth testing how it affect performance. + fields = { + "uri": block.uri, + "name": block.name, + "title": block.title, + "description": block.description, + "mimeType": block.mimeType, + "size": block.size, + } + parts = [ + f"{k}: {v}" + for k, v in fields.items() + if v is not None and (v or isinstance(v, (int, float))) + ] + block_prompt = "\n".join(parts) + text_prompt = f"{text_prompt}{separator}{block_prompt}" + case _: + raise ValueError(f"Unsupported content block type: {block.type}") + return text_prompt + + async def _run_agent_loop( + self, session: AcpSession, prompt: str + ) -> AsyncGenerator[SessionUpdate]: + rendered_prompt = render_path_prompt( + prompt, base_dir=session.agent.config.effective_workdir + ) + async for event in session.agent.act(rendered_prompt): + if isinstance(event, AssistantEvent): + yield AgentMessageChunk( + sessionUpdate="agent_message_chunk", + content=TextContentBlock(type="text", text=event.content), + ) + + elif isinstance(event, ToolCallEvent): + if issubclass(event.tool_class, BaseAcpTool): + event.tool_class.update_tool_state( + tool_manager=session.agent.tool_manager, + connection=self.connection, + session_id=session.id, + tool_call_id=event.tool_call_id, + ) + + session_update = tool_call_session_update(event) + if session_update: + yield session_update + + elif isinstance(event, ToolResultEvent): + session_update = tool_result_session_update(event) + if session_update: + yield session_update + + @override + async def cancel(self, params: CancelNotification) -> None: + session = self._get_session(params.sessionId) + if session.task and not session.task.done(): + session.task.cancel() + session.task = None + + @override + async def extMethod(self, method: str, params: dict) -> dict: + raise NotImplementedError() + + @override + async def extNotification(self, method: str, params: dict) -> None: + raise NotImplementedError() + + +async def _run_acp_server() -> None: + reader, writer = await stdio_streams() + + AgentSideConnection(lambda connection: VibeAcpAgent(connection), writer, reader) + await asyncio.Event().wait() + + +def run_acp_server() -> None: + try: + asyncio.run(_run_acp_server()) + except KeyboardInterrupt: + # This is expected when the server is terminated + pass + except Exception as e: + # Log any unexpected errors + print(f"ACP Agent Server error: {e}", file=sys.stderr) + raise diff --git a/vibe/acp/entrypoint.py b/vibe/acp/entrypoint.py new file mode 100644 index 0000000..d2db69b --- /dev/null +++ b/vibe/acp/entrypoint.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import argparse +from dataclasses import dataclass +import sys + +from vibe.acp.acp_agent import run_acp_server +from vibe.setup.onboarding import run_onboarding + +# Configure line buffering for subprocess communication +sys.stdout.reconfigure(line_buffering=True) # pyright: ignore[reportAttributeAccessIssue] +sys.stderr.reconfigure(line_buffering=True) # pyright: ignore[reportAttributeAccessIssue] +sys.stdin.reconfigure(line_buffering=True) # pyright: ignore[reportAttributeAccessIssue] + + +@dataclass +class Arguments: + setup: bool + + +def parse_arguments() -> Arguments: + parser = argparse.ArgumentParser(description="Run Mistral Vibe in ACP mode") + parser.add_argument("--setup", action="store_true", help="Setup API key and exit") + args = parser.parse_args() + return Arguments(setup=args.setup) + + +def main() -> None: + args = parse_arguments() + if args.setup: + run_onboarding() + sys.exit(0) + run_acp_server() + + +if __name__ == "__main__": + main() diff --git a/vibe/acp/tools/__init__.py b/vibe/acp/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/acp/tools/base.py b/vibe/acp/tools/base.py new file mode 100644 index 0000000..f2b7364 --- /dev/null +++ b/vibe/acp/tools/base.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +from abc import abstractmethod +from typing import Protocol, cast, runtime_checkable + +from acp import AgentSideConnection, SessionNotification +from acp.helpers import SessionUpdate, ToolCallContentVariant +from acp.schema import ToolCallProgress +from pydantic import Field + +from vibe.core.tools.base import BaseTool, ToolError +from vibe.core.tools.manager import ToolManager +from vibe.core.types import ToolCallEvent, ToolResultEvent +from vibe.core.utils import logger + + +@runtime_checkable +class ToolCallSessionUpdateProtocol(Protocol): + @classmethod + def tool_call_session_update(cls, event: ToolCallEvent) -> SessionUpdate | None: ... + + +@runtime_checkable +class ToolResultSessionUpdateProtocol(Protocol): + @classmethod + def tool_result_session_update( + cls, event: ToolResultEvent + ) -> SessionUpdate | None: ... + + +class AcpToolState: + connection: AgentSideConnection | None = Field( + default=None, description="ACP agent-side connection" + ) + session_id: str | None = Field(default=None, description="Current ACP session ID") + tool_call_id: str | None = Field( + default=None, description="Current ACP tool call ID" + ) + + +class BaseAcpTool[ToolState: AcpToolState](BaseTool): + state: ToolState + + @classmethod + def get_tool_instance( + cls, tool_name: str, tool_manager: ToolManager + ) -> BaseAcpTool[AcpToolState]: + return cast(BaseAcpTool[AcpToolState], tool_manager.get(tool_name)) + + @classmethod + def update_tool_state( + cls, + *, + tool_manager: ToolManager, + connection: AgentSideConnection | None, + session_id: str | None, + tool_call_id: str | None, + ) -> None: + tool_instance = cls.get_tool_instance(cls.get_name(), tool_manager) + tool_instance.state.connection = connection + tool_instance.state.session_id = session_id + tool_instance.state.tool_call_id = tool_call_id + + @classmethod + @abstractmethod + def _get_tool_state_class(cls) -> type[ToolState]: ... + + def _load_state(self) -> tuple[AgentSideConnection, str, str | None]: + if self.state.connection is None: + raise ToolError( + "Connection not available in tool state. This tool can only be used within an ACP session." + ) + if self.state.session_id is None: + raise ToolError( + "Session ID not available in tool state. This tool can only be used within an ACP session." + ) + + return self.state.connection, self.state.session_id, self.state.tool_call_id + + async def _send_in_progress_session_update( + self, content: list[ToolCallContentVariant] | None = None + ) -> None: + connection, session_id, tool_call_id = self._load_state() + if tool_call_id is None: + return + + try: + await connection.sessionUpdate( + SessionNotification( + sessionId=session_id, + update=ToolCallProgress( + sessionUpdate="tool_call_update", + toolCallId=tool_call_id, + status="in_progress", + content=content, + ), + ) + ) + except Exception as e: + logger.error(f"Failed to update session: {e!r}") diff --git a/vibe/acp/tools/builtins/bash.py b/vibe/acp/tools/builtins/bash.py new file mode 100644 index 0000000..0d41cd7 --- /dev/null +++ b/vibe/acp/tools/builtins/bash.py @@ -0,0 +1,144 @@ +from __future__ import annotations + +import asyncio +import shlex + +from acp import CreateTerminalRequest, TerminalHandle +from acp.schema import ( + EnvVariable, + TerminalToolCallContent, + ToolCallProgress, + ToolCallStart, + WaitForTerminalExitResponse, +) + +from vibe import VIBE_ROOT +from vibe.acp.tools.base import AcpToolState, BaseAcpTool +from vibe.core.tools.base import BaseToolState, ToolError +from vibe.core.tools.builtins.bash import Bash as CoreBashTool, BashArgs, BashResult +from vibe.core.types import ToolCallEvent, ToolResultEvent +from vibe.core.utils import logger + + +class AcpBashState(BaseToolState, AcpToolState): + pass + + +class Bash(CoreBashTool, BaseAcpTool[AcpBashState]): + prompt_path = VIBE_ROOT / "core" / "tools" / "builtins" / "prompts" / "bash.md" + state: AcpBashState + + @classmethod + def _get_tool_state_class(cls) -> type[AcpBashState]: + return AcpBashState + + async def run(self, args: BashArgs) -> BashResult: + connection, session_id, _ = self._load_state() + + timeout = args.timeout or self.config.default_timeout + max_bytes = self.config.max_output_bytes + env, command, cmd_args = self._parse_command(args.command) + + create_request = CreateTerminalRequest( + sessionId=session_id, + command=command, + args=cmd_args, + env=env, + cwd=str(self.config.effective_workdir), + outputByteLimit=max_bytes, + ) + + try: + terminal_handle = await connection.createTerminal(create_request) + except Exception as e: + raise ToolError(f"Failed to create terminal: {e!r}") from e + + await self._send_in_progress_session_update([ + TerminalToolCallContent(type="terminal", terminalId=terminal_handle.id) + ]) + + try: + exit_response = await self._wait_for_terminal_exit( + terminal_handle, timeout, args.command + ) + + output_response = await terminal_handle.current_output() + + return self._build_result( + command=args.command, + stdout=output_response.output, + stderr="", + returncode=exit_response.exitCode or 0, + ) + + finally: + try: + await terminal_handle.release() + except Exception as e: + logger.error(f"Failed to release terminal: {e!r}") + + def _parse_command( + self, command_str: str + ) -> tuple[list[EnvVariable], str, list[str]]: + parts = shlex.split(command_str) + env: list[EnvVariable] = [] + command: str = "" + args: list[str] = [] + + for part in parts: + if "=" in part and not command: + key, value = part.split("=", 1) + env.append(EnvVariable(name=key, value=value)) + elif not command: + command = part + else: + args.append(part) + + return env, command, args + + @classmethod + def get_summary(cls, args: BashArgs) -> str: + summary = f"{args.command}" + if args.timeout: + summary += f" (timeout {args.timeout}s)" + + return summary + + async def _wait_for_terminal_exit( + self, terminal_handle: TerminalHandle, timeout: int, command: str + ) -> WaitForTerminalExitResponse: + try: + return await asyncio.wait_for( + terminal_handle.wait_for_exit(), timeout=timeout + ) + except TimeoutError: + try: + await terminal_handle.kill() + except Exception as e: + logger.error(f"Failed to kill terminal: {e!r}") + + raise self._build_timeout_error(command, timeout) + + @classmethod + def tool_call_session_update(cls, event: ToolCallEvent) -> ToolCallStart: + if not isinstance(event.args, BashArgs): + raise ValueError(f"Unexpected tool args: {event.args}") + + return ToolCallStart( + sessionUpdate="tool_call", + title=Bash.get_summary(event.args), + content=None, + toolCallId=event.tool_call_id, + kind="execute", + rawInput=event.args.model_dump_json(), + ) + + @classmethod + def tool_result_session_update( + cls, event: ToolResultEvent + ) -> ToolCallProgress | None: + return ToolCallProgress( + sessionUpdate="tool_call_update", + toolCallId=event.tool_call_id, + status="failed" if event.error else "completed", + ) diff --git a/vibe/acp/tools/builtins/read_file.py b/vibe/acp/tools/builtins/read_file.py new file mode 100644 index 0000000..137513c --- /dev/null +++ b/vibe/acp/tools/builtins/read_file.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from pathlib import Path + +from acp import ReadTextFileRequest + +from vibe import VIBE_ROOT +from vibe.acp.tools.base import AcpToolState, BaseAcpTool +from vibe.core.tools.base import ToolError +from vibe.core.tools.builtins.read_file import ( + ReadFile as CoreReadFileTool, + ReadFileArgs, + ReadFileResult, + ReadFileState, + _ReadResult, +) + +ReadFileResult = ReadFileResult + + +class AcpReadFileState(ReadFileState, AcpToolState): + pass + + +class ReadFile(CoreReadFileTool, BaseAcpTool[AcpReadFileState]): + state: AcpReadFileState + prompt_path = VIBE_ROOT / "core" / "tools" / "builtins" / "prompts" / "read_file.md" + + @classmethod + def _get_tool_state_class(cls) -> type[AcpReadFileState]: + return AcpReadFileState + + async def _read_file(self, args: ReadFileArgs, file_path: Path) -> _ReadResult: + connection, session_id, _ = self._load_state() + + line = args.offset + 1 if args.offset > 0 else None + limit = args.limit + + read_request = ReadTextFileRequest( + sessionId=session_id, path=str(file_path), line=line, limit=limit + ) + + await self._send_in_progress_session_update() + + try: + response = await connection.readTextFile(read_request) + except Exception as e: + raise ToolError(f"Error reading {file_path}: {e}") from e + + content_lines = response.content.splitlines(keepends=True) + lines_read = len(content_lines) + bytes_read = sum(len(line.encode("utf-8")) for line in content_lines) + + was_truncated = args.limit is not None and lines_read >= args.limit + + return _ReadResult( + lines=content_lines, bytes_read=bytes_read, was_truncated=was_truncated + ) diff --git a/vibe/acp/tools/builtins/search_replace.py b/vibe/acp/tools/builtins/search_replace.py new file mode 100644 index 0000000..d363eef --- /dev/null +++ b/vibe/acp/tools/builtins/search_replace.py @@ -0,0 +1,132 @@ +from __future__ import annotations + +from pathlib import Path + +from acp import ReadTextFileRequest, WriteTextFileRequest +from acp.helpers import SessionUpdate +from acp.schema import ( + FileEditToolCallContent, + ToolCallLocation, + ToolCallProgress, + ToolCallStart, +) + +from vibe import VIBE_ROOT +from vibe.acp.tools.base import AcpToolState, BaseAcpTool +from vibe.core.tools.base import ToolError +from vibe.core.tools.builtins.search_replace import ( + SearchReplace as CoreSearchReplaceTool, + SearchReplaceArgs, + SearchReplaceResult, + SearchReplaceState, +) +from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class AcpSearchReplaceState(SearchReplaceState, AcpToolState): + file_backup_content: str | None = None + + +class SearchReplace(CoreSearchReplaceTool, BaseAcpTool[AcpSearchReplaceState]): + state: AcpSearchReplaceState + prompt_path = ( + VIBE_ROOT / "core" / "tools" / "builtins" / "prompts" / "search_replace.md" + ) + + @classmethod + def _get_tool_state_class(cls) -> type[AcpSearchReplaceState]: + return AcpSearchReplaceState + + async def _read_file(self, file_path: Path) -> str: + connection, session_id, _ = self._load_state() + + read_request = ReadTextFileRequest(sessionId=session_id, path=str(file_path)) + + await self._send_in_progress_session_update() + + try: + response = await connection.readTextFile(read_request) + except Exception as e: + raise ToolError(f"Unexpected error reading {file_path}: {e}") from e + + self.state.file_backup_content = response.content + return response.content + + async def _backup_file(self, file_path: Path) -> None: + if self.state.file_backup_content is None: + return + + await self._write_file( + file_path.with_suffix(file_path.suffix + ".bak"), + self.state.file_backup_content, + ) + + async def _write_file(self, file_path: Path, content: str) -> None: + connection, session_id, _ = self._load_state() + + write_request = WriteTextFileRequest( + sessionId=session_id, path=str(file_path), content=content + ) + + try: + await connection.writeTextFile(write_request) + except Exception as e: + raise ToolError(f"Error writing {file_path}: {e}") from e + + @classmethod + def tool_call_session_update(cls, event: ToolCallEvent) -> SessionUpdate | None: + args = event.args + if not isinstance(args, SearchReplaceArgs): + return None + + blocks = cls._parse_search_replace_blocks(args.content) + + return ToolCallStart( + sessionUpdate="tool_call", + title=cls.get_call_display(event).summary, + toolCallId=event.tool_call_id, + kind="edit", + content=[ + FileEditToolCallContent( + type="diff", + path=args.file_path, + oldText=block.search, + newText=block.replace, + ) + for block in blocks + ], + locations=[ToolCallLocation(path=args.file_path)], + rawInput=args.model_dump_json(), + ) + + @classmethod + def tool_result_session_update(cls, event: ToolResultEvent) -> SessionUpdate | None: + if event.error: + return ToolCallProgress( + sessionUpdate="tool_call_update", + toolCallId=event.tool_call_id, + status="failed", + ) + + result = event.result + if not isinstance(result, SearchReplaceResult): + return None + + blocks = cls._parse_search_replace_blocks(result.content) + + return ToolCallProgress( + sessionUpdate="tool_call_update", + toolCallId=event.tool_call_id, + status="completed", + content=[ + FileEditToolCallContent( + type="diff", + path=result.file, + oldText=block.search, + newText=block.replace, + ) + for block in blocks + ], + locations=[ToolCallLocation(path=result.file)], + rawOutput=result.model_dump_json(), + ) diff --git a/vibe/acp/tools/builtins/todo.py b/vibe/acp/tools/builtins/todo.py new file mode 100644 index 0000000..a19fb6b --- /dev/null +++ b/vibe/acp/tools/builtins/todo.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from typing import cast + +from acp.helpers import SessionUpdate +from acp.schema import AgentPlanUpdate, PlanEntry, PlanEntryPriority, PlanEntryStatus + +from vibe import VIBE_ROOT +from vibe.acp.tools.base import AcpToolState, BaseAcpTool +from vibe.core.tools.builtins.todo import ( + Todo as CoreTodoTool, + TodoArgs, + TodoPriority, + TodoResult, + TodoState, + TodoStatus, +) +from vibe.core.types import ToolCallEvent, ToolResultEvent + +TodoArgs = TodoArgs + + +class AcpTodoState(TodoState, AcpToolState): + pass + + +class Todo(CoreTodoTool, BaseAcpTool[AcpTodoState]): + state: AcpTodoState + prompt_path = VIBE_ROOT / "core" / "tools" / "builtins" / "prompts" / "todo.md" + + @classmethod + def _get_tool_state_class(cls) -> type[AcpTodoState]: + return AcpTodoState + + @classmethod + def tool_call_session_update(cls, event: ToolCallEvent) -> SessionUpdate | None: + return None + + @classmethod + def tool_result_session_update(cls, event: ToolResultEvent) -> SessionUpdate | None: + result = cast(TodoResult, event.result) + todos = [todo for todo in result.todos if todo.status != TodoStatus.CANCELLED] + matched_status: dict[TodoStatus, PlanEntryStatus] = { + TodoStatus.PENDING: "pending", + TodoStatus.IN_PROGRESS: "in_progress", + TodoStatus.COMPLETED: "completed", + } + matched_priority: dict[TodoPriority, PlanEntryPriority] = { + TodoPriority.LOW: "low", + TodoPriority.MEDIUM: "medium", + TodoPriority.HIGH: "high", + } + + update = AgentPlanUpdate( + sessionUpdate="plan", + entries=[ + PlanEntry( + content=todo.content, + status=matched_status[todo.status], + priority=matched_priority[todo.priority], + ) + for todo in todos + ], + ) + return update diff --git a/vibe/acp/tools/builtins/write_file.py b/vibe/acp/tools/builtins/write_file.py new file mode 100644 index 0000000..9612db8 --- /dev/null +++ b/vibe/acp/tools/builtins/write_file.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +from pathlib import Path + +from acp import WriteTextFileRequest +from acp.helpers import SessionUpdate +from acp.schema import ( + FileEditToolCallContent, + ToolCallLocation, + ToolCallProgress, + ToolCallStart, +) + +from vibe import VIBE_ROOT +from vibe.acp.tools.base import AcpToolState, BaseAcpTool +from vibe.core.tools.base import ToolError +from vibe.core.tools.builtins.write_file import ( + WriteFile as CoreWriteFileTool, + WriteFileArgs, + WriteFileResult, + WriteFileState, +) +from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class AcpWriteFileState(WriteFileState, AcpToolState): + pass + + +class WriteFile(CoreWriteFileTool, BaseAcpTool[AcpWriteFileState]): + state: AcpWriteFileState + prompt_path = ( + VIBE_ROOT / "core" / "tools" / "builtins" / "prompts" / "write_file.md" + ) + + @classmethod + def _get_tool_state_class(cls) -> type[AcpWriteFileState]: + return AcpWriteFileState + + async def _write_file(self, args: WriteFileArgs, file_path: Path) -> None: + connection, session_id, _ = self._load_state() + + write_request = WriteTextFileRequest( + sessionId=session_id, path=str(file_path), content=args.content + ) + + await self._send_in_progress_session_update() + + try: + await connection.writeTextFile(write_request) + except Exception as e: + raise ToolError(f"Error writing {file_path}: {e}") from e + + @classmethod + def tool_call_session_update(cls, event: ToolCallEvent) -> SessionUpdate | None: + args = event.args + if not isinstance(args, WriteFileArgs): + return None + + return ToolCallStart( + sessionUpdate="tool_call", + title=cls.get_call_display(event).summary, + toolCallId=event.tool_call_id, + kind="edit", + content=[ + FileEditToolCallContent( + type="diff", path=args.path, oldText=None, newText=args.content + ) + ], + locations=[ToolCallLocation(path=args.path)], + rawInput=args.model_dump_json(), + ) + + @classmethod + def tool_result_session_update(cls, event: ToolResultEvent) -> SessionUpdate | None: + if event.error: + return ToolCallProgress( + sessionUpdate="tool_call_update", + toolCallId=event.tool_call_id, + status="failed", + ) + + result = event.result + if not isinstance(result, WriteFileResult): + return None + + return ToolCallProgress( + sessionUpdate="tool_call_update", + toolCallId=event.tool_call_id, + status="completed", + content=[ + FileEditToolCallContent( + type="diff", path=result.path, oldText=None, newText=result.content + ) + ], + locations=[ToolCallLocation(path=result.path)], + rawOutput=result.model_dump_json(), + ) diff --git a/vibe/acp/tools/session_update.py b/vibe/acp/tools/session_update.py new file mode 100644 index 0000000..9b2321f --- /dev/null +++ b/vibe/acp/tools/session_update.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +from acp.helpers import SessionUpdate, ToolCallContentVariant +from acp.schema import ( + ContentToolCallContent, + TextContentBlock, + ToolCallProgress, + ToolCallStart, + ToolKind, +) + +from vibe.acp.tools.base import ( + ToolCallSessionUpdateProtocol, + ToolResultSessionUpdateProtocol, +) +from vibe.core.tools.ui import ToolUIDataAdapter +from vibe.core.types import ToolCallEvent, ToolResultEvent +from vibe.core.utils import TaggedText, is_user_cancellation_event + +TOOL_KIND: dict[str, ToolKind] = {"read_file": "read", "grep": "search"} + + +def tool_call_session_update(event: ToolCallEvent) -> SessionUpdate | None: + if issubclass(event.tool_class, ToolCallSessionUpdateProtocol): + return event.tool_class.tool_call_session_update(event) + + adapter = ToolUIDataAdapter(event.tool_class) + display = adapter.get_call_display(event) + content: list[ToolCallContentVariant] | None = ( + [ + ContentToolCallContent( + type="content", + content=TextContentBlock(type="text", text=display.content), + ) + ] + if display.content + else None + ) + + return ToolCallStart( + sessionUpdate="tool_call", + title=display.summary, + content=content, + toolCallId=event.tool_call_id, + kind=TOOL_KIND.get(event.tool_name, "other"), + rawInput=event.args.model_dump_json(), + ) + + +def tool_result_session_update(event: ToolResultEvent) -> SessionUpdate | None: + if is_user_cancellation_event(event): + tool_status = "failed" + raw_output = ( + TaggedText.from_string(event.skip_reason).message + if event.skip_reason + else None + ) + elif event.result: + tool_status = "completed" + raw_output = event.result.model_dump_json() + else: + tool_status = "failed" + raw_output = ( + TaggedText.from_string(event.error).message if event.error else None + ) + + if event.tool_class is None: + return ToolCallProgress( + sessionUpdate="tool_call_update", + toolCallId=event.tool_call_id, + status="failed", + rawOutput=raw_output, + content=[ + ContentToolCallContent( + type="content", + content=TextContentBlock(type="text", text=raw_output or ""), + ) + ], + ) + + if issubclass(event.tool_class, ToolResultSessionUpdateProtocol): + return event.tool_class.tool_result_session_update(event) + + if tool_status == "failed": + content = [ + ContentToolCallContent( + type="content", + content=TextContentBlock(type="text", text=raw_output or ""), + ) + ] + else: + adapter = ToolUIDataAdapter(event.tool_class) + display = adapter.get_result_display(event) + content: list[ToolCallContentVariant] | None = ( + [ + ContentToolCallContent( + type="content", + content=TextContentBlock(type="text", text=display.message), + ) + ] + if display.message + else None + ) + + return ToolCallProgress( + sessionUpdate="tool_call_update", + toolCallId=event.tool_call_id, + status=tool_status, + rawOutput=raw_output, + content=content, + ) diff --git a/vibe/acp/utils.py b/vibe/acp/utils.py new file mode 100644 index 0000000..1695a49 --- /dev/null +++ b/vibe/acp/utils.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import enum +from enum import StrEnum +from typing import Literal, cast + +from acp.schema import PermissionOption, SessionMode + + +class VibeSessionMode(enum.StrEnum): + APPROVAL_REQUIRED = enum.auto() + AUTO_APPROVE = enum.auto() + + def to_acp_session_mode(self) -> SessionMode: + match self: + case self.APPROVAL_REQUIRED: + return SessionMode( + id=VibeSessionMode.APPROVAL_REQUIRED, + name="Approval Required", + description="Requires user approval for tool executions", + ) + case self.AUTO_APPROVE: + return SessionMode( + id=VibeSessionMode.AUTO_APPROVE, + name="Auto Approve", + description="Automatically approves all tool executions", + ) + + @classmethod + def from_acp_session_mode(cls, session_mode: SessionMode) -> VibeSessionMode | None: + if not cls.is_valid(session_mode.id): + return None + return cls(session_mode.id) + + @classmethod + def is_valid(cls, mode_id: str) -> bool: + try: + return cls(mode_id).to_acp_session_mode() is not None + except (ValueError, KeyError): + return False + + @classmethod + def get_all_acp_session_modes(cls) -> list[SessionMode]: + return [mode.to_acp_session_mode() for mode in cls] + + +class ToolOption(StrEnum): + ALLOW_ONCE = "allow_once" + ALLOW_ALWAYS = "allow_always" + REJECT_ONCE = "reject_once" + REJECT_ALWAYS = "reject_always" + + +TOOL_OPTIONS = [ + PermissionOption( + optionId=ToolOption.ALLOW_ONCE, + name="Allow once", + kind=cast(Literal["allow_once"], ToolOption.ALLOW_ONCE), + ), + PermissionOption( + optionId=ToolOption.ALLOW_ALWAYS, + name="Allow always", + kind=cast(Literal["allow_always"], ToolOption.ALLOW_ALWAYS), + ), + PermissionOption( + optionId=ToolOption.REJECT_ONCE, + name="Reject once", + kind=cast(Literal["reject_once"], ToolOption.REJECT_ONCE), + ), +] diff --git a/vibe/cli/__init__.py b/vibe/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/cli/autocompletion/__init__.py b/vibe/cli/autocompletion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/cli/autocompletion/base.py b/vibe/cli/autocompletion/base.py new file mode 100644 index 0000000..edc562a --- /dev/null +++ b/vibe/cli/autocompletion/base.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from enum import StrEnum +from typing import Protocol + + +class CompletionResult(StrEnum): + IGNORED = "ignored" + HANDLED = "handled" + SUBMIT = "submit" + + +class CompletionView(Protocol): + def render_completion_suggestions( + self, suggestions: list[tuple[str, str]], selected_index: int + ) -> None: ... + + def clear_completion_suggestions(self) -> None: ... + + def replace_completion_range( + self, start: int, end: int, replacement: str + ) -> None: ... diff --git a/vibe/cli/autocompletion/path_completion.py b/vibe/cli/autocompletion/path_completion.py new file mode 100644 index 0000000..cd236d0 --- /dev/null +++ b/vibe/cli/autocompletion/path_completion.py @@ -0,0 +1,173 @@ +from __future__ import annotations + +from concurrent.futures import Future, ThreadPoolExecutor +from threading import Lock + +from textual import events + +from vibe.cli.autocompletion.base import CompletionResult, CompletionView +from vibe.core.autocompletion.completers import PathCompleter + +MAX_SUGGESTIONS_COUNT = 10 + + +class PathCompletionController: + _executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="path-completion") + + def __init__(self, completer: PathCompleter, view: CompletionView) -> None: + self._completer = completer + self._view = view + self._suggestions: list[tuple[str, str]] = [] + self._selected_index = 0 + self._pending_future: Future | None = None + self._last_query: tuple[str, int] | None = None + self._query_lock = Lock() + + def can_handle(self, text: str, cursor_index: int) -> bool: + if cursor_index < 0 or cursor_index > len(text): + return False + + if cursor_index == 0: + return False + + before_cursor = text[:cursor_index] + if "@" not in before_cursor: + return False + + at_index = before_cursor.rfind("@") + + if cursor_index <= at_index: + return False + + fragment = before_cursor[at_index:cursor_index] + # fragment must not be empty (including @) and not contain any spaces + return bool(fragment) and " " not in fragment + + def reset(self) -> None: + with self._query_lock: + if self._pending_future and not self._pending_future.done(): + self._pending_future.cancel() + self._pending_future = None + self._last_query = None + if self._suggestions: + self._suggestions.clear() + self._selected_index = 0 + self._view.clear_completion_suggestions() + + def on_text_changed(self, text: str, cursor_index: int) -> None: + if not self.can_handle(text, cursor_index): + self.reset() + return + + query = (text, cursor_index) + with self._query_lock: + if query == self._last_query: + return + + if self._pending_future and not self._pending_future.done(): + # NOTE (Vince): this is a "best effort" cancellation: it only works if the task + # hasn't started; once running in the thread pool, it cannot be cancelled + self._pending_future.cancel() + + self._last_query = query + + app = getattr(self._view, "app", None) + if app: + with self._query_lock: + self._pending_future = self._executor.submit( + self._compute_completions, text, cursor_index + ) + self._pending_future.add_done_callback( + lambda f: self._handle_completion_result(f, query) + ) + else: + suggestions = self._compute_completions(text, cursor_index) + self._update_suggestions(suggestions) + + def _compute_completions( + self, text: str, cursor_index: int + ) -> list[tuple[str, str]]: + return self._completer.get_completion_items(text, cursor_index) + + def _handle_completion_result(self, future: Future, query: tuple[str, int]) -> None: + if future.cancelled(): + return + + try: + suggestions = future.result() + with self._query_lock: + if query == self._last_query: + self._update_suggestions(suggestions) + except Exception: + with self._query_lock: + self._pending_future = None + self._last_query = None + + def _update_suggestions(self, suggestions: list[tuple[str, str]]) -> None: + if len(suggestions) > MAX_SUGGESTIONS_COUNT: + suggestions = suggestions[:MAX_SUGGESTIONS_COUNT] + + app = getattr(self._view, "app", None) + + if suggestions: + self._suggestions = suggestions + self._selected_index = 0 + if app: + app.call_after_refresh( + self._view.render_completion_suggestions, + self._suggestions, + self._selected_index, + ) + else: + self._view.render_completion_suggestions( + self._suggestions, self._selected_index + ) + elif app: + app.call_after_refresh(self.reset) + else: + self.reset() + + def on_key( + self, event: events.Key, text: str, cursor_index: int + ) -> CompletionResult: + if not self._suggestions: + return CompletionResult.IGNORED + + match event.key: + case "tab" | "enter": + if self._apply_selected_completion(text, cursor_index): + return CompletionResult.HANDLED + return CompletionResult.IGNORED + case "down": + self._move_selection(1) + return CompletionResult.HANDLED + case "up": + self._move_selection(-1) + return CompletionResult.HANDLED + case _: + return CompletionResult.IGNORED + + def _move_selection(self, delta: int) -> None: + if not self._suggestions: + return + + count = len(self._suggestions) + self._selected_index = (self._selected_index + delta) % count + self._view.render_completion_suggestions( + self._suggestions, self._selected_index + ) + + def _apply_selected_completion(self, text: str, cursor_index: int) -> bool: + if not self._suggestions: + return False + + completion, _ = self._suggestions[self._selected_index] + replacement_range = self._completer.get_replacement_range(text, cursor_index) + if replacement_range is None: + self.reset() + return False + + start, end = replacement_range + self._view.replace_completion_range(start, end, completion) + self.reset() + return True diff --git a/vibe/cli/autocompletion/slash_command.py b/vibe/cli/autocompletion/slash_command.py new file mode 100644 index 0000000..830ef34 --- /dev/null +++ b/vibe/cli/autocompletion/slash_command.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +from textual import events + +from vibe.cli.autocompletion.base import CompletionResult, CompletionView +from vibe.core.autocompletion.completers import CommandCompleter + +MAX_SUGGESTIONS_COUNT = 5 + + +class SlashCommandController: + def __init__(self, completer: CommandCompleter, view: CompletionView) -> None: + self._completer = completer + self._view = view + self._suggestions: list[tuple[str, str]] = [] + self._selected_index = 0 + + def can_handle(self, text: str, cursor_index: int) -> bool: + return text.startswith("/") + + def reset(self) -> None: + if self._suggestions: + self._suggestions.clear() + self._selected_index = 0 + self._view.clear_completion_suggestions() + + def on_text_changed(self, text: str, cursor_index: int) -> None: + if cursor_index < 0 or cursor_index > len(text): + self.reset() + return + + if not self.can_handle(text, cursor_index): + self.reset() + return + + suggestions = self._completer.get_completion_items(text, cursor_index) + if len(suggestions) > MAX_SUGGESTIONS_COUNT: + suggestions = suggestions[:MAX_SUGGESTIONS_COUNT] + if suggestions: + self._suggestions = suggestions + self._selected_index = 0 + self._view.render_completion_suggestions( + self._suggestions, self._selected_index + ) + else: + self.reset() + + def on_key( + self, event: events.Key, text: str, cursor_index: int + ) -> CompletionResult: + if not self._suggestions: + return CompletionResult.IGNORED + + match event.key: + case "tab": + if self._apply_selected_completion(text, cursor_index): + result = CompletionResult.HANDLED + else: + result = CompletionResult.IGNORED + case "enter": + if self._apply_selected_completion(text, cursor_index): + result = CompletionResult.SUBMIT + else: + result = CompletionResult.HANDLED + case "down": + self._move_selection(1) + result = CompletionResult.HANDLED + case "up": + self._move_selection(-1) + result = CompletionResult.HANDLED + case _: + result = CompletionResult.IGNORED + + return result + + def _move_selection(self, delta: int) -> None: + if not self._suggestions: + return + + count = len(self._suggestions) + self._selected_index = (self._selected_index + delta) % count + self._view.render_completion_suggestions( + self._suggestions, self._selected_index + ) + + def _apply_selected_completion(self, text: str, cursor_index: int) -> bool: + if not self._suggestions: + return False + + alias, _ = self._suggestions[self._selected_index] + replacement_range = self._completer.get_replacement_range(text, cursor_index) + if replacement_range is None: + self.reset() + return False + + start, end = replacement_range + self._view.replace_completion_range(start, end, alias) + self.reset() + return True diff --git a/vibe/cli/clipboard.py b/vibe/cli/clipboard.py new file mode 100644 index 0000000..33bcea7 --- /dev/null +++ b/vibe/cli/clipboard.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +import pyperclip +from textual.app import App + + +def copy_selection_to_clipboard(app: App) -> None: + selected_texts = [] + + for widget in app.query("*"): + if not hasattr(widget, "text_selection") or not widget.text_selection: + continue + + selection = widget.text_selection + result = widget.get_selection(selection) + if not result: + continue + + selected_text, _ = result + if selected_text.strip(): + selected_texts.append(selected_text) + + if not selected_texts: + return + + combined_text = "\n".join(selected_texts) + + try: + pyperclip.copy(combined_text) + app.notify("Selection added to clipboard", severity="information", timeout=2) + except Exception: + app.notify( + "Use Ctrl+c to copy selections in Vibe", severity="warning", timeout=3 + ) diff --git a/vibe/cli/commands.py b/vibe/cli/commands.py new file mode 100644 index 0000000..8d0362d --- /dev/null +++ b/vibe/cli/commands.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class Command: + aliases: frozenset[str] + description: str + handler: str + exits: bool = False + + +class CommandRegistry: + def __init__(self, excluded_commands: list[str] | None = None) -> None: + if excluded_commands is None: + excluded_commands = [] + self.commands = { + "help": Command( + aliases=frozenset(["/help", "/h"]), + description="Show help message", + handler="_show_help", + ), + "status": Command( + aliases=frozenset(["/status", "/stats"]), + description="Display agent statistics", + handler="_show_status", + ), + "config": Command( + aliases=frozenset(["/config", "/cfg", "/theme", "/model"]), + description="Edit config settings", + handler="_show_config", + ), + "reload": Command( + aliases=frozenset(["/reload", "/r"]), + description="Reload configuration from disk", + handler="_reload_config", + ), + "clear": Command( + aliases=frozenset(["/clear", "/reset"]), + description="Clear conversation history", + handler="_clear_history", + ), + "log": Command( + aliases=frozenset(["/log", "/logpath"]), + description="Show path to current interaction log file", + handler="_show_log_path", + ), + "compact": Command( + aliases=frozenset(["/compact", "/summarize"]), + description="Compact conversation history by summarizing", + handler="_compact_history", + ), + "exit": Command( + aliases=frozenset(["/exit", "/quit", "/q"]), + description="Exit the application", + handler="_exit_app", + exits=True, + ), + } + + for command in excluded_commands: + self.commands.pop(command, None) + + self._alias_map = {} + for cmd_name, cmd in self.commands.items(): + for alias in cmd.aliases: + self._alias_map[alias] = cmd_name + + def find_command(self, user_input: str) -> Command | None: + cmd_name = self._alias_map.get(user_input.lower().strip()) + return self.commands.get(cmd_name) if cmd_name else None + + def get_help_text(self) -> str: + lines: list[str] = [ + "### Keyboard Shortcuts", + "", + "- `Enter` Submit message", + "- `Ctrl+J` / `Shift+Enter` Insert newline", + "- `Escape` Interrupt agent or close dialogs", + "- `Ctrl+C` Quit (or clear input if text present)", + "- `Ctrl+O` Toggle tool output view", + "- `Ctrl+T` Toggle todo view", + "- `Shift+Tab` Toggle auto-approve mode", + "", + "### Special Features", + "", + "- `!` Execute bash command directly", + "- `@path/to/file/` Autocompletes file paths", + "", + "### Commands", + "", + ] + + for cmd in self.commands.values(): + aliases = ", ".join(f"`{alias}`" for alias in sorted(cmd.aliases)) + lines.append(f"- {aliases}: {cmd.description}") + return "\n".join(lines) diff --git a/vibe/cli/entrypoint.py b/vibe/cli/entrypoint.py new file mode 100644 index 0000000..8eb93f4 --- /dev/null +++ b/vibe/cli/entrypoint.py @@ -0,0 +1,261 @@ +from __future__ import annotations + +import argparse +import sys + +from rich import print as rprint + +from vibe.cli.textual_ui.app import run_textual_ui +from vibe.core.config import ( + CONFIG_FILE, + HISTORY_FILE, + INSTRUCTIONS_FILE, + MissingAPIKeyError, + MissingPromptFileError, + VibeConfig, + load_api_keys_from_env, +) +from vibe.core.interaction_logger import InteractionLogger +from vibe.core.programmatic import run_programmatic +from vibe.core.types import OutputFormat, ResumeSessionInfo +from vibe.core.utils import ConversationLimitException +from vibe.setup.onboarding import run_onboarding + + +def parse_arguments() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run the Mistral Vibe interactive CLI") + parser.add_argument( + "initial_prompt", + nargs="?", + metavar="PROMPT", + help="Initial prompt to start the interactive session with.", + ) + parser.add_argument( + "-p", + "--prompt", + nargs="?", + const="", + metavar="TEXT", + help="Run in programmatic mode: send prompt, auto-approve all tools, " + "output response, and exit.", + ) + parser.add_argument( + "--auto-approve", + action="store_true", + default=False, + help="Automatically approve all tool executions.", + ) + parser.add_argument( + "--max-turns", + type=int, + metavar="N", + help="Maximum number of assistant turns " + "(only applies in programmatic mode with -p).", + ) + parser.add_argument( + "--max-price", + type=float, + metavar="DOLLARS", + help="Maximum cost in dollars (only applies in programmatic mode with -p). " + "Session will be interrupted if cost exceeds this limit.", + ) + parser.add_argument( + "--enabled-tools", + action="append", + metavar="TOOL", + help="Enable specific tools. In programmatic mode (-p), this disables " + "all other tools. " + "Can use exact names, glob patterns (e.g., 'bash*'), or " + "regex with 're:' prefix. Can be specified multiple times.", + ) + parser.add_argument( + "--output", + type=str, + choices=["text", "json", "streaming"], + default="text", + help="Output format for programmatic mode (-p): 'text' " + "for human-readable (default), 'json' for all messages at end, " + "'streaming' for newline-delimited JSON per message.", + ) + parser.add_argument( + "--agent", + metavar="NAME", + default=None, + help="Load agent configuration from ~/.vibe/agents/NAME.toml", + ) + parser.add_argument("--setup", action="store_true", help="Setup API key and exit") + + continuation_group = parser.add_mutually_exclusive_group() + continuation_group.add_argument( + "-c", + "--continue", + action="store_true", + dest="continue_session", + help="Continue from the most recent saved session", + ) + continuation_group.add_argument( + "--resume", + metavar="SESSION_ID", + help="Resume a specific session by its ID (supports partial matching)", + ) + return parser.parse_args() + + +def get_prompt_from_stdin() -> str | None: + if sys.stdin.isatty(): + return None + try: + if content := sys.stdin.read().strip(): + sys.stdin = sys.__stdin__ = open("/dev/tty") + return content + except KeyboardInterrupt: + pass + except OSError: + return None + + return None + + +def load_config_or_exit(agent: str | None = None) -> VibeConfig: + try: + return VibeConfig.load(agent) + except MissingAPIKeyError: + run_onboarding() + return VibeConfig.load(agent) + except MissingPromptFileError as e: + rprint(f"[yellow]Invalid system prompt id: {e}[/]") + sys.exit(1) + except ValueError as e: + rprint(f"[yellow]{e}[/]") + sys.exit(1) + + +def main() -> None: # noqa: PLR0912, PLR0915 + load_api_keys_from_env() + args = parse_arguments() + + if args.setup: + run_onboarding() + sys.exit(0) + try: + if not CONFIG_FILE.exists(): + try: + VibeConfig.save_updates(VibeConfig.create_default()) + except Exception as e: + rprint(f"[yellow]Could not create default config file: {e}[/]") + + if not INSTRUCTIONS_FILE.exists(): + try: + INSTRUCTIONS_FILE.parent.mkdir(parents=True, exist_ok=True) + INSTRUCTIONS_FILE.touch() + except Exception as e: + rprint(f"[yellow]Could not create instructions file: {e}[/]") + + if not HISTORY_FILE.exists(): + try: + HISTORY_FILE.parent.mkdir(parents=True, exist_ok=True) + HISTORY_FILE.write_text("Hello Vibe!\n", "utf-8") + except Exception as e: + rprint(f"[yellow]Could not create history file: {e}[/]") + + config = load_config_or_exit(args.agent) + + if args.enabled_tools: + config.enabled_tools = args.enabled_tools + + loaded_messages = None + session_info = None + + if args.continue_session or args.resume: + if not config.session_logging.enabled: + rprint( + "[red]Session logging is disabled. " + "Enable it in config to use --continue or --resume[/]" + ) + sys.exit(1) + + session_to_load = None + if args.continue_session: + session_to_load = InteractionLogger.find_latest_session( + config.session_logging + ) + if not session_to_load: + rprint( + f"[red]No previous sessions found in " + f"{config.session_logging.save_dir}[/]" + ) + sys.exit(1) + else: + session_to_load = InteractionLogger.find_session_by_id( + args.resume, config.session_logging + ) + if not session_to_load: + rprint( + f"[red]Session '{args.resume}' not found in " + f"{config.session_logging.save_dir}[/]" + ) + sys.exit(1) + + try: + loaded_messages, metadata = InteractionLogger.load_session( + session_to_load + ) + session_id = metadata.get("session_id", "unknown")[:8] + session_time = metadata.get("start_time", "unknown time") + + session_info = ResumeSessionInfo( + type="continue" if args.continue_session else "resume", + session_id=session_id, + session_time=session_time, + ) + except Exception as e: + rprint(f"[red]Failed to load session: {e}[/]") + sys.exit(1) + + stdin_prompt = get_prompt_from_stdin() + if args.prompt is not None: + programmatic_prompt = args.prompt or stdin_prompt + if not programmatic_prompt: + print( + "Error: No prompt provided for programmatic mode", file=sys.stderr + ) + sys.exit(1) + output_format = OutputFormat( + args.output if hasattr(args, "output") else "text" + ) + + try: + final_response = run_programmatic( + config=config, + prompt=programmatic_prompt, + max_turns=args.max_turns, + max_price=args.max_price, + output_format=output_format, + previous_messages=loaded_messages, + ) + if final_response: + print(final_response) + sys.exit(0) + except ConversationLimitException as e: + print(e, file=sys.stderr) + sys.exit(1) + except RuntimeError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + else: + run_textual_ui( + config, + auto_approve=args.auto_approve, + enable_streaming=True, + initial_prompt=args.initial_prompt or stdin_prompt, + loaded_messages=loaded_messages, + session_info=session_info, + ) + + except (KeyboardInterrupt, EOFError): + rprint("\n[dim]Bye![/]") + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/vibe/cli/history_manager.py b/vibe/cli/history_manager.py new file mode 100644 index 0000000..d2b067c --- /dev/null +++ b/vibe/cli/history_manager.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import json +from pathlib import Path + + +class HistoryManager: + def __init__(self, history_file: Path, max_entries: int = 100) -> None: + self.history_file = history_file + self.max_entries = max_entries + self._entries: list[str] = [] + self._current_index: int = -1 + self._temp_input: str = "" + self._load_history() + + def _load_history(self) -> None: + if not self.history_file.exists(): + return + + try: + with self.history_file.open("r", encoding="utf-8") as f: + entries = [] + for raw_line in f: + raw_line = raw_line.rstrip("\n\r") + if not raw_line: + continue + try: + entry = json.loads(raw_line) + except json.JSONDecodeError: + entry = raw_line + entries.append(entry if isinstance(entry, str) else str(entry)) + self._entries = entries[-self.max_entries :] + except (OSError, UnicodeDecodeError): + self._entries = [] + + def _save_history(self) -> None: + try: + self.history_file.parent.mkdir(parents=True, exist_ok=True) + with self.history_file.open("w", encoding="utf-8") as f: + for entry in self._entries: + f.write(json.dumps(entry) + "\n") + except OSError: + pass + + def add(self, text: str) -> None: + text = text.strip() + if not text or text.startswith("/"): + return + + if self._entries and self._entries[-1] == text: + return + + self._entries.append(text) + + if len(self._entries) > self.max_entries: + self._entries = self._entries[-self.max_entries :] + + self._save_history() + self.reset_navigation() + + def get_previous(self, current_input: str, prefix: str = "") -> str | None: + if not self._entries: + return None + + if self._current_index == -1: + self._temp_input = current_input + self._current_index = len(self._entries) + + for i in range(self._current_index - 1, -1, -1): + if self._entries[i].startswith(prefix): + self._current_index = i + return self._entries[i] + + return None + + def get_next(self, prefix: str = "") -> str | None: + if self._current_index == -1: + return None + + for i in range(self._current_index + 1, len(self._entries)): + if self._entries[i].startswith(prefix): + self._current_index = i + return self._entries[i] + + result = self._temp_input + self.reset_navigation() + return result + + def reset_navigation(self) -> None: + self._current_index = -1 + self._temp_input = "" diff --git a/vibe/cli/textual_ui/__init__.py b/vibe/cli/textual_ui/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/cli/textual_ui/app.py b/vibe/cli/textual_ui/app.py new file mode 100644 index 0000000..b0544c7 --- /dev/null +++ b/vibe/cli/textual_ui/app.py @@ -0,0 +1,1099 @@ +from __future__ import annotations + +import asyncio +from enum import StrEnum, auto +import os +import subprocess +from typing import Any, ClassVar, assert_never + +from textual.app import App, ComposeResult +from textual.binding import Binding, BindingType +from textual.containers import Horizontal, VerticalScroll +from textual.events import MouseUp +from textual.widget import Widget +from textual.widgets import Static + +from vibe.cli.clipboard import copy_selection_to_clipboard +from vibe.cli.commands import CommandRegistry +from vibe.cli.textual_ui.handlers.event_handler import EventHandler +from vibe.cli.textual_ui.widgets.approval_app import ApprovalApp +from vibe.cli.textual_ui.widgets.chat_input import ChatInputContainer +from vibe.cli.textual_ui.widgets.compact import CompactMessage +from vibe.cli.textual_ui.widgets.config_app import ConfigApp +from vibe.cli.textual_ui.widgets.context_progress import ContextProgress, TokenState +from vibe.cli.textual_ui.widgets.loading import LoadingWidget +from vibe.cli.textual_ui.widgets.messages import ( + AssistantMessage, + BashOutputMessage, + ErrorMessage, + InterruptMessage, + UserCommandMessage, + UserMessage, +) +from vibe.cli.textual_ui.widgets.mode_indicator import ModeIndicator +from vibe.cli.textual_ui.widgets.path_display import PathDisplay +from vibe.cli.textual_ui.widgets.tools import ToolCallMessage, ToolResultMessage +from vibe.cli.textual_ui.widgets.welcome import WelcomeBanner +from vibe.cli.update_notifier import ( + GitHubVersionUpdateGateway, + VersionUpdate, + VersionUpdateError, + is_version_update_available, +) +from vibe.cli.update_notifier.version_update_gateway import VersionUpdateGateway +from vibe.core import __version__ as CORE_VERSION +from vibe.core.agent import Agent +from vibe.core.autocompletion.path_prompt_adapter import render_path_prompt +from vibe.core.config import HISTORY_FILE, VibeConfig +from vibe.core.tools.base import BaseToolConfig, ToolPermission +from vibe.core.types import LLMMessage, ResumeSessionInfo, Role +from vibe.core.utils import ( + ApprovalResponse, + CancellationReason, + get_user_cancellation_message, + is_dangerous_directory, + logger, +) + + +class BottomApp(StrEnum): + Approval = auto() + Config = auto() + Input = auto() + + +class VibeApp(App): + ENABLE_COMMAND_PALETTE = False + CSS_PATH = "app.tcss" + + BINDINGS: ClassVar[list[BindingType]] = [ + Binding("ctrl+c", "force_quit", "Quit", show=False), + Binding("escape", "interrupt", "Interrupt", show=False, priority=True), + Binding("ctrl+o", "toggle_tool", "Toggle Tool", show=False), + Binding("ctrl+t", "toggle_todo", "Toggle Todo", show=False), + Binding("shift+tab", "cycle_mode", "Cycle Mode", show=False, priority=True), + ] + + def __init__( + self, + config: VibeConfig, + auto_approve: bool = False, + enable_streaming: bool = False, + initial_prompt: str | None = None, + loaded_messages: list[LLMMessage] | None = None, + session_info: ResumeSessionInfo | None = None, + version_update_notifier: VersionUpdateGateway | None = None, + current_version: str = CORE_VERSION, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + self.config = config + self.auto_approve = auto_approve + self.enable_streaming = enable_streaming + self.agent: Agent | None = None + self._agent_running = False + self._agent_initializing = False + self._interrupt_requested = False + self._agent_task: asyncio.Task | None = None + + self._loading_widget: LoadingWidget | None = None + self._pending_approval: asyncio.Future | None = None + + self.event_handler: EventHandler | None = None + self.commands = CommandRegistry() + + self._chat_input_container: ChatInputContainer | None = None + self._mode_indicator: ModeIndicator | None = None + self._context_progress: ContextProgress | None = None + self._current_bottom_app: BottomApp = BottomApp.Input + self.theme = config.textual_theme + + self.history_file = HISTORY_FILE + + self._tools_collapsed = True + self._todos_collapsed = False + self._current_streaming_message: AssistantMessage | None = None + self._version_update_notifier = version_update_notifier + self._is_update_check_enabled = config.enable_update_checks + self._current_version = current_version + self._update_notification_task: asyncio.Task | None = None + self._update_notification_shown = False + + self._initial_prompt = initial_prompt + self._loaded_messages = loaded_messages + self._session_info = session_info + self._agent_init_task: asyncio.Task | None = None + # prevent a race condition where the agent initialization + # completes exactly at the moment the user interrupts + self._agent_init_interrupted = False + + def compose(self) -> ComposeResult: + with VerticalScroll(id="chat"): + yield WelcomeBanner(self.config) + yield Static(id="messages") + + with Horizontal(id="loading-area"): + yield Static(id="loading-area-content") + yield ModeIndicator(auto_approve=self.auto_approve) + + yield Static(id="todo-area") + + with Static(id="bottom-app-container"): + yield ChatInputContainer( + history_file=self.history_file, + command_registry=self.commands, + id="input-container", + show_warning=self.auto_approve, + ) + + with Horizontal(id="bottom-bar"): + yield PathDisplay( + self.config.displayed_workdir or self.config.effective_workdir + ) + yield Static(id="spacer") + yield ContextProgress() + + async def on_mount(self) -> None: + self.event_handler = EventHandler( + mount_callback=self._mount_and_scroll, + scroll_callback=self._scroll_to_bottom_deferred, + todo_area_callback=lambda: self.query_one("#todo-area"), + get_tools_collapsed=lambda: self._tools_collapsed, + get_todos_collapsed=lambda: self._todos_collapsed, + ) + + self._chat_input_container = self.query_one(ChatInputContainer) + self._mode_indicator = self.query_one(ModeIndicator) + self._context_progress = self.query_one(ContextProgress) + + if self.config.auto_compact_threshold > 0: + self._context_progress.tokens = TokenState( + max_tokens=self.config.auto_compact_threshold, current_tokens=0 + ) + + chat_input_container = self.query_one(ChatInputContainer) + chat_input_container.focus_input() + await self._show_dangerous_directory_warning() + self._schedule_update_notification() + + if self._session_info: + await self._mount_and_scroll(AssistantMessage(self._session_info.message())) + + if self._initial_prompt: + self.call_after_refresh(self._process_initial_prompt) + else: + self._ensure_agent_init_task() + + def _process_initial_prompt(self) -> None: + if self._initial_prompt: + self.run_worker( + self._handle_user_message(self._initial_prompt), exclusive=False + ) + + async def on_chat_input_container_submitted( + self, event: ChatInputContainer.Submitted + ) -> None: + value = event.value.strip() + if not value: + return + + input_widget = self.query_one(ChatInputContainer) + input_widget.value = "" + + if self._agent_running: + await self._interrupt_agent() + + if value.startswith("!"): + await self._handle_bash_command(value[1:]) + return + + if await self._handle_command(value): + return + + await self._handle_user_message(value) + + async def on_approval_app_approval_granted( + self, message: ApprovalApp.ApprovalGranted + ) -> None: + if self._pending_approval and not self._pending_approval.done(): + self._pending_approval.set_result((ApprovalResponse.YES, None)) + + await self._switch_to_input_app() + + async def on_approval_app_approval_granted_always_tool( + self, message: ApprovalApp.ApprovalGrantedAlwaysTool + ) -> None: + self._set_tool_permission_always( + message.tool_name, save_permanently=message.save_permanently + ) + + if self._pending_approval and not self._pending_approval.done(): + self._pending_approval.set_result((ApprovalResponse.YES, None)) + + await self._switch_to_input_app() + + async def on_approval_app_approval_rejected( + self, message: ApprovalApp.ApprovalRejected + ) -> None: + if self._pending_approval and not self._pending_approval.done(): + feedback = str( + get_user_cancellation_message(CancellationReason.OPERATION_CANCELLED) + ) + self._pending_approval.set_result((ApprovalResponse.NO, feedback)) + + await self._switch_to_input_app() + + if self._loading_widget and self._loading_widget.parent: + await self._remove_loading_widget() + + async def _remove_loading_widget(self) -> None: + if self._loading_widget and self._loading_widget.parent: + await self._loading_widget.remove() + self._loading_widget = None + + def on_config_app_setting_changed(self, message: ConfigApp.SettingChanged) -> None: + if message.key == "textual_theme": + self.theme = message.value + + async def on_config_app_config_closed( + self, message: ConfigApp.ConfigClosed + ) -> None: + if message.changes: + self._save_config_changes(message.changes) + await self._reload_config() + else: + await self._mount_and_scroll( + UserCommandMessage("Configuration closed (no changes saved).") + ) + + await self._switch_to_input_app() + + def _set_tool_permission_always( + self, tool_name: str, save_permanently: bool = False + ) -> None: + if save_permanently: + VibeConfig.save_updates({"tools": {tool_name: {"permission": "always"}}}) + + if tool_name not in self.config.tools: + self.config.tools[tool_name] = BaseToolConfig() + + self.config.tools[tool_name].permission = ToolPermission.ALWAYS + + def _save_config_changes(self, changes: dict[str, str]) -> None: + if not changes: + return + + updates: dict = {} + + for key, value in changes.items(): + match key: + case "active_model": + if value != self.config.active_model: + updates["active_model"] = value + case "textual_theme": + if value != self.config.textual_theme: + updates["textual_theme"] = value + + if updates: + VibeConfig.save_updates(updates) + + async def _handle_command(self, user_input: str) -> bool: + if command := self.commands.find_command(user_input): + handler = getattr(self, command.handler) + if asyncio.iscoroutinefunction(handler): + await handler() + else: + handler() + return True + return False + + async def _handle_bash_command(self, command: str) -> None: + if not command: + await self._mount_and_scroll( + ErrorMessage( + "No command provided after '!'", collapsed=self._tools_collapsed + ) + ) + return + + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=False, + timeout=30, + cwd=self.config.effective_workdir, + ) + stdout = ( + result.stdout.decode("utf-8", errors="replace") if result.stdout else "" + ) + stderr = ( + result.stderr.decode("utf-8", errors="replace") if result.stderr else "" + ) + output = stdout or stderr or "(no output)" + exit_code = result.returncode + await self._mount_and_scroll( + BashOutputMessage( + command, str(self.config.effective_workdir), output, exit_code + ) + ) + except subprocess.TimeoutExpired: + await self._mount_and_scroll( + ErrorMessage( + "Command timed out after 30 seconds", + collapsed=self._tools_collapsed, + ) + ) + except Exception as e: + await self._mount_and_scroll( + ErrorMessage(f"Command failed: {e}", collapsed=self._tools_collapsed) + ) + + async def _handle_user_message(self, message: str) -> None: + init_task = self._ensure_agent_init_task() + pending_init = bool(init_task and not init_task.done()) + user_message = UserMessage(message, pending=pending_init) + + await self._mount_and_scroll(user_message) + + self.run_worker( + self._process_user_message_after_mount( + message=message, + user_message=user_message, + init_task=init_task, + pending_init=pending_init, + ), + exclusive=False, + ) + + async def _process_user_message_after_mount( + self, + message: str, + user_message: UserMessage, + init_task: asyncio.Task | None, + pending_init: bool, + ) -> None: + try: + if init_task and not init_task.done(): + loading = LoadingWidget() + self._loading_widget = loading + await self.query_one("#loading-area-content").mount(loading) + + try: + await init_task + finally: + if self._loading_widget and self._loading_widget.parent: + await self._loading_widget.remove() + self._loading_widget = None + if pending_init: + await user_message.set_pending(False) + elif pending_init: + await user_message.set_pending(False) + + if pending_init and self._agent_init_interrupted: + self._agent_init_interrupted = False + return + + if self.agent and not self._agent_running: + self._agent_task = asyncio.create_task(self._handle_agent_turn(message)) + except asyncio.CancelledError: + self._agent_init_interrupted = False + if pending_init: + await user_message.set_pending(False) + return + + async def _initialize_agent(self) -> None: + if self.agent or self._agent_initializing: + return + + self._agent_initializing = True + try: + agent = Agent( + self.config, + auto_approve=self.auto_approve, + enable_streaming=self.enable_streaming, + ) + + if not self.auto_approve: + agent.approval_callback = self._approval_callback + + if self._loaded_messages: + non_system_messages = [ + msg + for msg in self._loaded_messages + if not (msg.role == Role.system) + ] + agent.messages.extend(non_system_messages) + logger.info( + "Loaded %d messages from previous session", len(non_system_messages) + ) + + self.agent = agent + except asyncio.CancelledError: + self.agent = None + return + except Exception as e: + self.agent = None + await self._mount_and_scroll( + ErrorMessage(str(e), collapsed=self._tools_collapsed) + ) + finally: + self._agent_initializing = False + self._agent_init_task = None + + def _ensure_agent_init_task(self) -> asyncio.Task | None: + if self.agent: + self._agent_init_task = None + self._agent_init_interrupted = False + return None + + if self._agent_init_task and self._agent_init_task.done(): + if self._agent_init_task.cancelled(): + self._agent_init_task = None + + if not self._agent_init_task or self._agent_init_task.done(): + self._agent_init_interrupted = False + self._agent_init_task = asyncio.create_task(self._initialize_agent()) + + return self._agent_init_task + + async def _approval_callback( + self, tool: str, args: dict, tool_call_id: str + ) -> tuple[str, str | None]: + self._pending_approval = asyncio.Future() + await self._switch_to_approval_app(tool, args) + result = await self._pending_approval + self._pending_approval = None + return result + + async def _handle_agent_turn(self, prompt: str) -> None: + if not self.agent: + return + + self._agent_running = True + + loading_area = self.query_one("#loading-area-content") + + loading = LoadingWidget() + self._loading_widget = loading + await loading_area.mount(loading) + + try: + rendered_prompt = render_path_prompt( + prompt, base_dir=self.config.effective_workdir + ) + async for event in self.agent.act(rendered_prompt): + if self._context_progress and self.agent: + current_state = self._context_progress.tokens + self._context_progress.tokens = TokenState( + max_tokens=current_state.max_tokens, + current_tokens=self.agent.stats.context_tokens, + ) + + if self.event_handler: + await self.event_handler.handle_event( + event, + loading_active=self._loading_widget is not None, + loading_widget=self._loading_widget, + ) + + except asyncio.CancelledError: + if self._loading_widget and self._loading_widget.parent: + await self._loading_widget.remove() + if self.event_handler: + self.event_handler.stop_current_tool_call() + raise + except Exception as e: + if self._loading_widget and self._loading_widget.parent: + await self._loading_widget.remove() + if self.event_handler: + self.event_handler.stop_current_tool_call() + await self._mount_and_scroll( + ErrorMessage(str(e), collapsed=self._tools_collapsed) + ) + finally: + self._agent_running = False + self._interrupt_requested = False + self._agent_task = None + if self._loading_widget: + await self._loading_widget.remove() + self._loading_widget = None + await self._finalize_current_streaming_message() + + async def _interrupt_agent(self) -> None: + interrupting_agent_init = bool( + self._agent_init_task and not self._agent_init_task.done() + ) + + if ( + not self._agent_running and not interrupting_agent_init + ) or self._interrupt_requested: + return + + self._interrupt_requested = True + + if interrupting_agent_init and self._agent_init_task: + self._agent_init_interrupted = True + self._agent_init_task.cancel() + try: + await self._agent_init_task + except asyncio.CancelledError: + pass + + if self._agent_task and not self._agent_task.done(): + self._agent_task.cancel() + try: + await self._agent_task + except asyncio.CancelledError: + pass + + if self.event_handler: + self.event_handler.stop_current_tool_call() + self.event_handler.stop_current_compact() + + self._agent_running = False + loading_area = self.query_one("#loading-area-content") + await loading_area.remove_children() + + await self._finalize_current_streaming_message() + await self._mount_and_scroll(InterruptMessage()) + + self._interrupt_requested = False + + async def _show_help(self) -> None: + help_text = self.commands.get_help_text() + await self._mount_and_scroll(UserCommandMessage(help_text)) + + async def _show_status(self) -> None: + if self.agent is None: + await self._mount_and_scroll( + ErrorMessage( + "Agent not initialized yet. Send a message first.", + collapsed=self._tools_collapsed, + ) + ) + return + + stats = self.agent.stats + status_text = f"""## Agent Statistics + +- **Steps**: {stats.steps:,} +- **Session Prompt Tokens**: {stats.session_prompt_tokens:,} +- **Session Completion Tokens**: {stats.session_completion_tokens:,} +- **Session Total LLM Tokens**: {stats.session_total_llm_tokens:,} +- **Last Turn Tokens**: {stats.last_turn_total_tokens:,} +- **Cost**: ${stats.session_cost:.4f} +""" + await self._mount_and_scroll(UserCommandMessage(status_text)) + + async def _show_config(self) -> None: + """Switch to the configuration app in the bottom panel.""" + if self._current_bottom_app == BottomApp.Config: + return + await self._switch_to_config_app() + + async def _reload_config(self) -> None: + try: + new_config = VibeConfig.load() + + if self.agent: + await self.agent.reload_with_initial_messages(config=new_config) + + self.config = new_config + if self._context_progress: + if self.config.auto_compact_threshold > 0: + current_tokens = ( + self.agent.stats.context_tokens if self.agent else 0 + ) + self._context_progress.tokens = TokenState( + max_tokens=self.config.auto_compact_threshold, + current_tokens=current_tokens, + ) + else: + self._context_progress.tokens = TokenState() + + await self._mount_and_scroll(UserCommandMessage("Configuration reloaded.")) + except Exception as e: + await self._mount_and_scroll( + ErrorMessage( + f"Failed to reload config: {e}", collapsed=self._tools_collapsed + ) + ) + + async def _clear_history(self) -> None: + if self.agent is None: + await self._mount_and_scroll( + ErrorMessage( + "No conversation history to clear yet.", + collapsed=self._tools_collapsed, + ) + ) + return + + if not self.agent: + return + + try: + await self.agent.clear_history() + await self._finalize_current_streaming_message() + messages_area = self.query_one("#messages") + await messages_area.remove_children() + todo_area = self.query_one("#todo-area") + await todo_area.remove_children() + + if self._context_progress and self.agent: + current_state = self._context_progress.tokens + self._context_progress.tokens = TokenState( + max_tokens=current_state.max_tokens, + current_tokens=self.agent.stats.context_tokens, + ) + await self._mount_and_scroll( + UserCommandMessage("Conversation history cleared!") + ) + chat = self.query_one("#chat", VerticalScroll) + chat.scroll_home(animate=False) + + except Exception as e: + await self._mount_and_scroll( + ErrorMessage( + f"Failed to clear history: {e}", collapsed=self._tools_collapsed + ) + ) + + async def _show_log_path(self) -> None: + if self.agent is None: + await self._mount_and_scroll( + ErrorMessage( + "No log file created yet. Send a message first.", + collapsed=self._tools_collapsed, + ) + ) + return + + if not self.agent.interaction_logger.enabled: + await self._mount_and_scroll( + ErrorMessage( + "Session logging is disabled in configuration.", + collapsed=self._tools_collapsed, + ) + ) + return + + try: + log_path = str(self.agent.interaction_logger.filepath) + await self._mount_and_scroll( + UserCommandMessage( + f"## Current Log File Path\n\n`{log_path}`\n\nYou can send this file to share your interaction." + ) + ) + except Exception as e: + await self._mount_and_scroll( + ErrorMessage( + f"Failed to get log path: {e}", collapsed=self._tools_collapsed + ) + ) + + async def _compact_history(self) -> None: + if self._agent_running: + await self._mount_and_scroll( + ErrorMessage( + "Cannot compact while agent is processing. Please wait.", + collapsed=self._tools_collapsed, + ) + ) + return + + if self.agent is None: + await self._mount_and_scroll( + ErrorMessage( + "No conversation history to compact yet.", + collapsed=self._tools_collapsed, + ) + ) + return + + if len(self.agent.messages) <= 1: + await self._mount_and_scroll( + ErrorMessage( + "No conversation history to compact yet.", + collapsed=self._tools_collapsed, + ) + ) + return + + if not self.agent or not self.event_handler: + return + + old_tokens = self.agent.stats.context_tokens + compact_msg = CompactMessage() + self.event_handler.current_compact = compact_msg + await self._mount_and_scroll(compact_msg) + + try: + await self.agent.compact() + new_tokens = self.agent.stats.context_tokens + compact_msg.set_complete(old_tokens=old_tokens, new_tokens=new_tokens) + self.event_handler.current_compact = None + + if self._context_progress: + current_state = self._context_progress.tokens + self._context_progress.tokens = TokenState( + max_tokens=current_state.max_tokens, current_tokens=new_tokens + ) + except Exception as e: + compact_msg.set_error(str(e)) + self.event_handler.current_compact = None + + async def _exit_app(self) -> None: + self.exit() + + async def _switch_to_config_app(self) -> None: + if self._current_bottom_app == BottomApp.Config: + return + + bottom_container = self.query_one("#bottom-app-container") + await self._mount_and_scroll(UserCommandMessage("Configuration opened...")) + + try: + chat_input_container = self.query_one(ChatInputContainer) + await chat_input_container.remove() + except Exception: + pass + + if self._mode_indicator: + self._mode_indicator.display = False + + config_app = ConfigApp(self.config) + await bottom_container.mount(config_app) + self._current_bottom_app = BottomApp.Config + + self.call_after_refresh(config_app.focus) + + async def _switch_to_approval_app(self, tool_name: str, tool_args: dict) -> None: + bottom_container = self.query_one("#bottom-app-container") + + try: + chat_input_container = self.query_one(ChatInputContainer) + await chat_input_container.remove() + except Exception: + pass + + if self._mode_indicator: + self._mode_indicator.display = False + + approval_app = ApprovalApp( + tool_name=tool_name, + tool_args=tool_args, + workdir=str(self.config.effective_workdir), + config=self.config, + ) + await bottom_container.mount(approval_app) + self._current_bottom_app = BottomApp.Approval + + self.call_after_refresh(approval_app.focus) + self.call_after_refresh(self._scroll_to_bottom) + + async def _switch_to_input_app(self) -> None: + bottom_container = self.query_one("#bottom-app-container") + + try: + config_app = self.query_one("#config-app") + await config_app.remove() + except Exception: + pass + + try: + approval_app = self.query_one("#approval-app") + await approval_app.remove() + except Exception: + pass + + if self._mode_indicator: + self._mode_indicator.display = True + + try: + chat_input_container = self.query_one(ChatInputContainer) + self._chat_input_container = chat_input_container + self._current_bottom_app = BottomApp.Input + self.call_after_refresh(chat_input_container.focus_input) + return + except Exception: + pass + + chat_input_container = ChatInputContainer( + history_file=self.history_file, + command_registry=self.commands, + id="input-container", + show_warning=self.auto_approve, + ) + await bottom_container.mount(chat_input_container) + self._chat_input_container = chat_input_container + + self._current_bottom_app = BottomApp.Input + + self.call_after_refresh(chat_input_container.focus_input) + + def _focus_current_bottom_app(self) -> None: + try: + match self._current_bottom_app: + case BottomApp.Input: + self.query_one(ChatInputContainer).focus_input() + case BottomApp.Config: + self.query_one(ConfigApp).focus() + case BottomApp.Approval: + self.query_one(ApprovalApp).focus() + case app: + assert_never(app) + except Exception: + pass + + def action_interrupt(self) -> None: + if self._current_bottom_app == BottomApp.Config: + try: + config_app = self.query_one(ConfigApp) + config_app.action_close() + except Exception: + pass + return + + if self._current_bottom_app == BottomApp.Approval: + try: + approval_app = self.query_one(ApprovalApp) + approval_app.action_reject() + except Exception: + pass + return + + has_pending_user_message = any( + msg.has_class("pending") for msg in self.query(UserMessage) + ) + + interrupt_needed = self._agent_running or ( + self._agent_init_task + and not self._agent_init_task.done() + and has_pending_user_message + ) + + if interrupt_needed: + self.run_worker(self._interrupt_agent(), exclusive=False) + + self._scroll_to_bottom() + self._focus_current_bottom_app() + + async def action_toggle_tool(self) -> None: + if not self.event_handler: + return + + self._tools_collapsed = not self._tools_collapsed + + non_todo_results = [ + result + for result in self.event_handler.tool_results + if result.event.tool_name != "todo" + ] + + for result in non_todo_results: + result.collapsed = self._tools_collapsed + await result.render_result() + + try: + error_messages = self.query(ErrorMessage) + for error_msg in error_messages: + error_msg.set_collapsed(self._tools_collapsed) + except Exception: + pass + + async def action_toggle_todo(self) -> None: + if not self.event_handler: + return + + self._todos_collapsed = not self._todos_collapsed + + todo_results = [ + result + for result in self.event_handler.tool_results + if result.event.tool_name == "todo" + ] + + for result in todo_results: + result.collapsed = self._todos_collapsed + await result.render_result() + + def action_cycle_mode(self) -> None: + if self._current_bottom_app != BottomApp.Input: + return + + self.auto_approve = not self.auto_approve + + if self._mode_indicator: + self._mode_indicator.set_auto_approve(self.auto_approve) + + if self._chat_input_container: + self._chat_input_container.set_show_warning(self.auto_approve) + + if self.agent: + self.agent.auto_approve = self.auto_approve + + if self.auto_approve: + self.agent.approval_callback = None + else: + self.agent.approval_callback = self._approval_callback + + self._focus_current_bottom_app() + + def action_force_quit(self) -> None: + input_widgets = self.query(ChatInputContainer) + if input_widgets: + input_widget = input_widgets.first() + if input_widget.value: + input_widget.value = "" + return + + if self._agent_task and not self._agent_task.done(): + self._agent_task.cancel() + + self.exit() + + async def _show_dangerous_directory_warning(self) -> None: + is_dangerous, reason = is_dangerous_directory() + if is_dangerous: + warning = ( + f"⚠️ WARNING: {reason}\n\nRunning in this location is not recommended." + ) + await self._mount_and_scroll(UserCommandMessage(warning)) + + async def _finalize_current_streaming_message(self) -> None: + if self._current_streaming_message is None: + return + + await self._current_streaming_message.stop_stream() + self._current_streaming_message = None + + async def _mount_and_scroll(self, widget: Widget) -> None: + messages_area = self.query_one("#messages") + chat = self.query_one("#chat", VerticalScroll) + was_at_bottom = self._is_scrolled_to_bottom(chat) + + if isinstance(widget, AssistantMessage): + if self._current_streaming_message is not None: + content = widget._content or "" + if content: + await self._current_streaming_message.append_content(content) + else: + self._current_streaming_message = widget + await messages_area.mount(widget) + await widget.write_initial_content() + else: + await self._finalize_current_streaming_message() + await messages_area.mount(widget) + + is_tool_message = isinstance(widget, (ToolCallMessage, ToolResultMessage)) + + if not is_tool_message: + self.call_after_refresh(self._scroll_to_bottom) + + if was_at_bottom: + self.call_after_refresh(self._anchor_if_scrollable) + + def _is_scrolled_to_bottom(self, scroll_view: VerticalScroll) -> bool: + try: + threshold = 3 + return scroll_view.scroll_y >= (scroll_view.max_scroll_y - threshold) + except Exception: + return True + + def _scroll_to_bottom(self) -> None: + try: + chat = self.query_one("#chat") + chat.scroll_end(animate=False) + except Exception: + pass + + def _scroll_to_bottom_deferred(self) -> None: + self.call_after_refresh(self._scroll_to_bottom) + + def _anchor_if_scrollable(self) -> None: + try: + chat = self.query_one("#chat", VerticalScroll) + if chat.max_scroll_y == 0: + return + chat.anchor() + except Exception: + pass + + def _schedule_update_notification(self) -> None: + if ( + self._version_update_notifier is None + or self._update_notification_task + or not self._is_update_check_enabled + ): + return + + self._update_notification_task = asyncio.create_task( + self._check_version_update(), name="version-update-check" + ) + + async def _check_version_update(self) -> None: + try: + if self._version_update_notifier is None: + return + + update = await is_version_update_available( + self._version_update_notifier, current_version=self._current_version + ) + except VersionUpdateError as error: + self.notify( + error.message, + title="Update check failed", + severity="warning", + timeout=10, + ) + return + except Exception as exc: + logger.debug("Version update check failed", exc_info=exc) + return + finally: + self._update_notification_task = None + + if update is None: + return + + self._display_update_notification(update) + + def _display_update_notification(self, update: VersionUpdate) -> None: + if self._update_notification_shown: + return + + message = f'{self._current_version} => {update.latest_version}\nRun "uv tool upgrade mistral-vibe" to update' + + self.notify( + message, title="Update available", severity="information", timeout=10 + ) + self._update_notification_shown = True + + def on_mouse_up(self, event: MouseUp) -> None: + copy_selection_to_clipboard(self) + + +def run_textual_ui( + config: VibeConfig, + auto_approve: bool = False, + enable_streaming: bool = False, + initial_prompt: str | None = None, + loaded_messages: list[LLMMessage] | None = None, + session_info: ResumeSessionInfo | None = None, +) -> None: + update_notifier = GitHubVersionUpdateGateway( + owner="mistralai", repository="mistral-vibe", token=os.getenv("GITHUB_TOKEN") + ) + app = VibeApp( + config=config, + auto_approve=auto_approve, + enable_streaming=enable_streaming, + initial_prompt=initial_prompt, + loaded_messages=loaded_messages, + session_info=session_info, + version_update_notifier=update_notifier, + ) + app.run() diff --git a/vibe/cli/textual_ui/app.tcss b/vibe/cli/textual_ui/app.tcss new file mode 100644 index 0000000..9e61b04 --- /dev/null +++ b/vibe/cli/textual_ui/app.tcss @@ -0,0 +1,681 @@ +Screen { + background: $background; +} + +#chat { + height: 1fr; + width: 100%; + background: $background; + padding: 0 2 0 2; +} + +#loading-area { + height: auto; + width: 100%; + background: $background; + padding: 1 2 0 2; + layout: horizontal; + align: left middle; +} + +#loading-area-content { + width: 1fr; + height: auto; + align: left middle; +} + +#todo-area { + height: auto; + max-height: 8; + width: 100%; + background: $background; + margin: 0; + padding: 0; + overflow-y: auto; + text-align: left; + margin-top: 0; +} + +#bottom-app-container { + height: auto; + width: 100%; + background: $background; +} + +#bottom-bar { + height: auto; + width: 100%; + background: $background; + padding: 0 2 1 2; + align: left middle; + layout: horizontal; +} + +#spacer { + width: 1fr; + height: auto; + background: transparent; +} + +#messages { + width: 100%; + height: auto; + text-align: left; + margin-top: 1; + margin-bottom: 0; +} + +#input-container { + height: auto; + width: 100%; + background: $background; + padding: 0; + margin: 0 2 1 2; +} + +#completion-popup { + width: 100%; + padding: 1 1 1 1; + color: $text; +} + +#input-box { + height: auto; + width: 100%; + background: $background; + border: round $foreground-muted; + padding: 0 1; + + &.border-warning { + border: round $warning; + } +} + +#input-body { + height: auto; +} + +#prompt { + width: auto; + background: transparent; + color: $primary; + text-style: bold; + padding: 0 1 0 0; +} + +#input { + width: 1fr; + height: auto; + max-height: 16; + background: transparent; + color: $text; + border: none; + padding: 0; +} + +ToastRack { + align: left bottom; + padding: 0 2; + margin: 0 2 6 2; +} + +Markdown MarkdownFence { + overflow-x: auto; + scrollbar-size-horizontal: 1; + max-width: 95%; +} + +.user-message { + margin-top: 1; + width: 100%; + height: auto; + + &:first-child { + margin-top: 0; + } + + &.pending { + .user-message-prompt, + .user-message-content { + opacity: 0.7; + text-style: italic; + } + } +} + +.user-message-container { + width: 100%; + height: auto; +} + +.user-message-prompt { + width: auto; + height: auto; + color: $primary; + text-style: bold; +} + +.user-message-content { + width: 1fr; + height: auto; + color: $foreground; + text-style: bold; +} + +.assistant-message { + margin-top: 1; + width: 100%; + height: auto; +} + +.assistant-message-container { + width: 100%; + height: auto; + align: left top; +} + +.assistant-message-dot { + width: auto; + height: auto; + color: $foreground; +} + +.assistant-message-content { + width: 1fr; + height: auto; + padding: 0; + + Markdown { + width: 100%; + height: auto; + padding: 0; + margin: 0; + } +} + +.interrupt-message { + margin-top: 0; + margin-bottom: 0; + margin-left: 2; + height: auto; + padding: 1 2; + background: $warning 10%; + color: $text-warning; +} + +.error-message { + margin-top: 1; + height: auto; + padding: 1 2; + background: $error 10%; + color: $error; + text-style: bold; +} + +.bash-output-message { + margin-top: 1; + width: 100%; + height: auto; +} + +.bash-output-container { + width: 100%; + height: auto; + padding: 1 2; + background: $surface; +} + +.bash-cwd-line, +.bash-command-line { + width: 100%; + height: auto; + margin-bottom: 1; + align: left middle; +} + +.bash-cwd { + width: auto; + color: $text-muted; +} + +.bash-cwd-spacer, +.bash-command-spacer { + width: 1fr; +} + +.bash-chevron { + width: auto; + color: $primary; + text-style: bold; +} + +.bash-command { + width: auto; + color: $foreground; +} + +.bash-output { + width: 100%; + color: $foreground; +} + +.bash-exit-success { + width: auto; + color: $text-success; +} + +.bash-exit-failure { + width: auto; + color: $text-error; +} + +.bash-exit-code { + width: auto; + color: $text-muted; +} + +.unknown-event { + height: auto; + color: $text-muted; +} + +BlinkingMessage { + width: 100%; + height: auto; + + Horizontal { + width: 100%; + height: auto; + } +} + +.blink-dot { + width: auto; + height: auto; + color: $foreground; + + &.success { + color: $text-success; + } + + &.error { + color: $text-error; + } +} + +.blink-text { + width: 1fr; + height: auto; + color: $foreground; +} + +.compact-message { + width: 100%; + height: auto; + margin-top: 1; +} + +.tool-call { + width: 100%; + height: auto; + margin-top: 1; +} + +.tool-result { + width: 100%; + height: auto; + margin-top: 0; + margin-left: 2; + padding: 1 2; + background: $surface; + color: $foreground; + + &.error-text { + background: $error 10%; + color: $text-error; + } + + &.warning-text { + background: $warning 10%; + color: $text-warning; + } +} + +.tool-call-widget { + width: 100%; + height: auto; + color: $foreground; +} + +.tool-call-detail { + height: auto; + color: $text-muted; +} + +.tool-result-widget { + width: 100%; + height: auto; + color: $foreground; + + Static { + height: auto; + } +} + +.tool-result-detail { + height: auto; + margin-top: 0; + color: $text-muted; +} + +.tool-result-error { + color: $text-error; +} + +.tool-result-warning { + color: $text-warning; +} + +.diff-header { + height: auto; + color: $text-muted; + text-style: bold; +} + +.diff-removed { + height: auto; + color: $text-error; +} + +.diff-added { + height: auto; + color: $text-success; +} + +.diff-range { + height: auto; + color: $primary; +} + +.diff-context { + height: auto; + color: $text-muted; +} + +.todo-empty { + height: auto; + color: $text-muted; +} + +.todo-pending { + height: auto; + color: $foreground; +} + +.todo-in_progress { + height: auto; + color: $warning; +} + +.todo-completed { + height: auto; + color: $success; +} + +.todo-cancelled { + height: auto; + color: $text-muted; +} + +#todo-area .tool-result { + margin-left: 0; + background: $surface; +} + +.loading-widget { + width: 100%; + height: auto; +} + +.loading-container { + width: auto; + height: auto; +} + +.loading-star { + width: auto; + height: auto; + color: $warning; +} + +.loading-status { + width: auto; + height: auto; +} + +.loading-char { + width: auto; + height: auto; +} + +.loading-ellipsis { + width: auto; + height: auto; + color: $text-muted; +} + +.loading-hint { + width: auto; + height: auto; + color: $foreground; +} + +WelcomeBanner { + width: 100%; + height: auto; + border: round $surface; + border-title-align: center; + text-align: center; + content-align: center middle; + padding: 2 4; + margin: 1 1 0 1; + color: $foreground; + + .muted { + color: $text-muted; + } +} + +#config-app { + width: 100%; + height: auto; + background: $background; + border: round $foreground-muted; + padding: 0 1; + margin: 0 2 1 2; +} + +#config-content { + width: 100%; + height: auto; +} + +.settings-title { + height: auto; + text-style: bold; + color: $primary; +} + +.settings-option { + height: auto; + color: $foreground; +} + +.settings-cursor-selected { + color: $primary; + text-style: bold; +} + +.settings-label-selected { + color: $text; + text-style: bold; +} + +.settings-value-toggle-on-selected { + color: $text-success; + text-style: bold; +} + +.settings-value-toggle-on-unselected { + color: $success; +} + +.settings-value-toggle-off { + color: $text-muted; +} + +.settings-value-cycle-selected { + color: $primary; + text-style: bold; +} + +.settings-value-cycle-unselected { + color: $primary; +} + +.settings-help { + height: auto; + color: $text-muted; +} + +#approval-app { + width: 100%; + height: auto; + max-height: 16; + background: $background; + border: round $foreground-muted; + padding: 0 1; + margin: 0 2 1 2; +} + +#approval-content { + width: 100%; + height: auto; +} + +.approval-tool-info-scroll { + width: 100%; + height: auto; + max-height: 8; +} + +.approval-title { + height: auto; + text-style: bold; + color: $warning; +} + +.approval-tool-info-container { + width: 100%; + height: auto; +} + +.tool-approval-widget { + width: 100%; + height: auto; + + Static { + width: 100%; + height: auto; + } + + Vertical { + width: 100%; + height: auto; + } +} + +.approval-option { + height: auto; + color: $foreground; +} + +.approval-cursor-selected { + &.approval-option-yes { + color: $text-success; + text-style: bold; + } + + &.approval-option-no { + color: $text-error; + text-style: bold; + } +} + +.approval-option-selected { + &.approval-option-yes { + color: $success; + } + + &.approval-option-no { + color: $error; + } +} + +.approval-help { + height: auto; + color: $text-muted; +} + +.approval-description { + height: auto; + color: $foreground; +} + +.code-block { + height: auto; + color: $foreground; + background: $surface; + padding: 1; +} + +Horizontal { + width: 100%; + height: auto; +} + +ModeIndicator { + width: auto; + height: auto; + background: transparent; + padding: 0; + margin: 0 0 0 1; + color: $warning; + align: left middle; + + &.mode-on { + color: $warning; + } + + &.mode-off { + color: $text-muted; + } +} + +PathDisplay { + width: auto; + height: auto; + background: transparent; + padding: 0; + margin: 0; + color: $primary; +} + +ContextProgress { + width: auto; + height: auto; + background: transparent; + padding: 0; + margin: 0; + color: $foreground; +} diff --git a/vibe/cli/textual_ui/handlers/__init__.py b/vibe/cli/textual_ui/handlers/__init__.py new file mode 100644 index 0000000..4c1c9a4 --- /dev/null +++ b/vibe/cli/textual_ui/handlers/__init__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from vibe.cli.textual_ui.handlers.event_handler import EventHandler + +__all__ = ["EventHandler"] diff --git a/vibe/cli/textual_ui/handlers/event_handler.py b/vibe/cli/textual_ui/handlers/event_handler.py new file mode 100644 index 0000000..a279ffb --- /dev/null +++ b/vibe/cli/textual_ui/handlers/event_handler.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +from collections.abc import Callable +from typing import TYPE_CHECKING + +from textual.widgets import Static + +from vibe.cli.textual_ui.widgets.compact import CompactMessage +from vibe.cli.textual_ui.widgets.messages import AssistantMessage +from vibe.cli.textual_ui.widgets.tools import ToolCallMessage, ToolResultMessage +from vibe.core.types import ( + AssistantEvent, + BaseEvent, + CompactEndEvent, + CompactStartEvent, + ToolCallEvent, + ToolResultEvent, +) +from vibe.core.utils import TaggedText + +if TYPE_CHECKING: + from vibe.cli.textual_ui.widgets.loading import LoadingWidget + + +class EventHandler: + def __init__( + self, + mount_callback: Callable, + scroll_callback: Callable, + todo_area_callback: Callable, + get_tools_collapsed: Callable[[], bool], + get_todos_collapsed: Callable[[], bool], + ) -> None: + self.mount_callback = mount_callback + self.scroll_callback = scroll_callback + self.todo_area_callback = todo_area_callback + self.get_tools_collapsed = get_tools_collapsed + self.get_todos_collapsed = get_todos_collapsed + self.current_tool_call: ToolCallMessage | None = None + self.current_compact: CompactMessage | None = None + self.tool_results: list[ToolResultMessage] = [] + + async def handle_event( + self, + event: BaseEvent, + loading_active: bool = False, + loading_widget: LoadingWidget | None = None, + ) -> ToolCallMessage | None: + match event: + case ToolCallEvent(): + return await self._handle_tool_call(event, loading_widget) + case ToolResultEvent(): + sanitized_event = self._sanitize_event(event) + + await self._handle_tool_result(sanitized_event) + return None + case AssistantEvent(): + await self._handle_assistant_message(event) + return None + case CompactStartEvent(): + await self._handle_compact_start() + return None + case CompactEndEvent(): + await self._handle_compact_end(event) + return None + case _: + await self._handle_unknown_event(event) + return None + + def _sanitize_event(self, event: ToolResultEvent) -> ToolResultEvent: + if isinstance(event, ToolResultEvent): + return ToolResultEvent( + tool_name=event.tool_name, + tool_class=event.tool_class, + result=event.result, + error=TaggedText.from_string(event.error).message + if event.error + else None, + skipped=event.skipped, + skip_reason=TaggedText.from_string(event.skip_reason).message + if event.skip_reason + else None, + duration=event.duration, + tool_call_id=event.tool_call_id, + ) + return event + + async def _handle_tool_call( + self, event: ToolCallEvent, loading_widget: LoadingWidget | None = None + ) -> ToolCallMessage | None: + tool_call = ToolCallMessage(event) + + if loading_widget and event.tool_class: + from vibe.core.tools.ui import ToolUIDataAdapter + + adapter = ToolUIDataAdapter(event.tool_class) + status_text = adapter.get_status_text() + loading_widget.set_status(status_text) + + # Don't show todo in messages + if event.tool_name != "todo": + await self.mount_callback(tool_call) + + self.current_tool_call = tool_call + return tool_call + + async def _handle_tool_result(self, event: ToolResultEvent) -> None: + if event.tool_name == "todo": + todos_collapsed = self.get_todos_collapsed() + tool_result = ToolResultMessage( + event, self.current_tool_call, collapsed=todos_collapsed + ) + # Show in todo area + todo_area = self.todo_area_callback() + await todo_area.remove_children() + await todo_area.mount(tool_result) + else: + tools_collapsed = self.get_tools_collapsed() + tool_result = ToolResultMessage( + event, self.current_tool_call, collapsed=tools_collapsed + ) + await self.mount_callback(tool_result) + + self.tool_results.append(tool_result) + self.current_tool_call = None + + async def _handle_assistant_message(self, event: AssistantEvent) -> None: + await self.mount_callback(AssistantMessage(event.content)) + + async def _handle_compact_start(self) -> None: + compact_msg = CompactMessage() + self.current_compact = compact_msg + await self.mount_callback(compact_msg) + + async def _handle_compact_end(self, event: CompactEndEvent) -> None: + if self.current_compact: + self.current_compact.set_complete( + old_tokens=event.old_context_tokens, new_tokens=event.new_context_tokens + ) + self.current_compact = None + + async def _handle_unknown_event(self, event: BaseEvent) -> None: + await self.mount_callback( + Static(str(event), markup=False, classes="unknown-event") + ) + + def stop_current_tool_call(self) -> None: + if self.current_tool_call: + self.current_tool_call.stop_blinking() + self.current_tool_call = None + + def stop_current_compact(self) -> None: + if self.current_compact: + self.current_compact.stop_blinking(success=False) + self.current_compact = None + + def get_last_tool_result(self) -> ToolResultMessage | None: + return self.tool_results[-1] if self.tool_results else None diff --git a/vibe/cli/textual_ui/renderers/__init__.py b/vibe/cli/textual_ui/renderers/__init__.py new file mode 100644 index 0000000..c0c384a --- /dev/null +++ b/vibe/cli/textual_ui/renderers/__init__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from vibe.cli.textual_ui.renderers.tool_renderers import get_renderer + +__all__ = ["get_renderer"] diff --git a/vibe/cli/textual_ui/renderers/tool_renderers.py b/vibe/cli/textual_ui/renderers/tool_renderers.py new file mode 100644 index 0000000..01acc4b --- /dev/null +++ b/vibe/cli/textual_ui/renderers/tool_renderers.py @@ -0,0 +1,216 @@ +from __future__ import annotations + +import difflib +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from vibe.core.tools.ui import ToolResultDisplay + +from vibe.cli.textual_ui.widgets.tool_widgets import ( + BashApprovalWidget, + BashResultWidget, + GrepApprovalWidget, + GrepResultWidget, + ReadFileApprovalWidget, + ReadFileResultWidget, + SearchReplaceApprovalWidget, + SearchReplaceResultWidget, + TodoApprovalWidget, + TodoResultWidget, + ToolApprovalWidget, + ToolResultWidget, + WriteFileApprovalWidget, + WriteFileResultWidget, +) + + +class ToolRenderer: + def get_approval_widget( + self, tool_args: dict + ) -> tuple[type[ToolApprovalWidget], dict[str, Any]]: + return ToolApprovalWidget, tool_args + + def get_result_widget( + self, display: ToolResultDisplay, collapsed: bool + ) -> tuple[type[ToolResultWidget], dict[str, Any]]: + data = { + "success": display.success, + "message": display.message, + "details": self._clean_details(display.details), + "warnings": display.warnings, + } + return ToolResultWidget, data + + def _clean_details(self, details: dict) -> dict: + clean = {} + for key, value in details.items(): + if value is None or value in ("", []): + continue + value_str = str(value).strip().replace("\n", " ").replace("\r", "") + value_str = " ".join(value_str.split()) + if value_str: + clean[key] = value_str + return clean + + +class BashRenderer(ToolRenderer): + def get_approval_widget( + self, tool_args: dict + ) -> tuple[type[BashApprovalWidget], dict[str, Any]]: + data = { + "command": tool_args.get("command", ""), + "description": tool_args.get("description", ""), + } + return BashApprovalWidget, data + + def get_result_widget( + self, display: ToolResultDisplay, collapsed: bool + ) -> tuple[type[BashResultWidget], dict[str, Any]]: + data = { + "success": display.success, + "message": display.message, + "details": self._clean_details(display.details), + "warnings": display.warnings, + } + return BashResultWidget, data + + +class WriteFileRenderer(ToolRenderer): + def get_approval_widget( + self, tool_args: dict + ) -> tuple[type[WriteFileApprovalWidget], dict[str, Any]]: + data = { + "path": tool_args.get("path", ""), + "content": tool_args.get("content", ""), + "file_extension": tool_args.get("file_extension", "text"), + } + return WriteFileApprovalWidget, data + + def get_result_widget( + self, display: ToolResultDisplay, collapsed: bool + ) -> tuple[type[WriteFileResultWidget], dict[str, Any]]: + data = { + "success": display.success, + "message": display.message, + "path": display.details.get("path", ""), + "bytes_written": display.details.get("bytes_written"), + "content": display.details.get("content", ""), + "file_extension": display.details.get("file_extension", "text"), + } + return WriteFileResultWidget, data + + +class SearchReplaceRenderer(ToolRenderer): + def get_approval_widget( + self, tool_args: dict + ) -> tuple[type[SearchReplaceApprovalWidget], dict[str, Any]]: + file_path = tool_args.get("file_path", "") + content = str(tool_args.get("content", "")) + + diff_lines = self._parse_search_replace_blocks(content) + + data = {"file_path": file_path, "diff_lines": diff_lines} + return SearchReplaceApprovalWidget, data + + def get_result_widget( + self, display: ToolResultDisplay, collapsed: bool + ) -> tuple[type[SearchReplaceResultWidget], dict[str, Any]]: + diff_lines = self._parse_search_replace_blocks( + display.details.get("content", "") + ) + data = { + "success": display.success, + "message": display.message, + "diff_lines": diff_lines if not collapsed else [], + } + return SearchReplaceResultWidget, data + + def _parse_search_replace_blocks(self, content: str) -> list[str]: + if "<<<<<<< SEARCH" not in content: + return [content] + + try: + sections = content.split("<<<<<<< SEARCH") + rest = sections[1].split("=======") + search_section = rest[0].strip() + replace_part = rest[1].split(">>>>>>> REPLACE") + replace_section = replace_part[0].strip() + + search_lines = search_section.split("\n") + replace_lines = replace_section.split("\n") + + diff = difflib.unified_diff(search_lines, replace_lines, lineterm="", n=2) + return list(diff)[2:] # Skip file headers + except (IndexError, AttributeError): + return [content[:500]] + + +class TodoRenderer(ToolRenderer): + def get_approval_widget( + self, tool_args: dict + ) -> tuple[type[TodoApprovalWidget], dict[str, Any]]: + data = {"description": tool_args.get("description", "")} + return TodoApprovalWidget, data + + def get_result_widget( + self, display: ToolResultDisplay, collapsed: bool + ) -> tuple[type[TodoResultWidget], dict[str, Any]]: + data = { + "success": display.success, + "message": display.message, + "todos_by_status": display.details.get("todos_by_status", {}), + } + return TodoResultWidget, data + + +class ReadFileRenderer(ToolRenderer): + def get_approval_widget( + self, tool_args: dict + ) -> tuple[type[ReadFileApprovalWidget], dict[str, Any]]: + return ReadFileApprovalWidget, tool_args + + def get_result_widget( + self, display: ToolResultDisplay, collapsed: bool + ) -> tuple[type[ReadFileResultWidget], dict[str, Any]]: + data = { + "success": display.success, + "message": display.message, + "path": display.details.get("path", ""), + "warnings": display.warnings, + "content": display.details.get("content", "") if not collapsed else "", + "file_extension": display.details.get("file_extension", "text"), + } + return ReadFileResultWidget, data + + +class GrepRenderer(ToolRenderer): + def get_approval_widget( + self, tool_args: dict + ) -> tuple[type[GrepApprovalWidget], dict[str, Any]]: + return GrepApprovalWidget, tool_args + + def get_result_widget( + self, display: ToolResultDisplay, collapsed: bool + ) -> tuple[type[GrepResultWidget], dict[str, Any]]: + data = { + "success": display.success, + "message": display.message, + "warnings": display.warnings, + "matches": display.details.get("matches", "") if not collapsed else "", + } + return GrepResultWidget, data + + +_RENDERER_REGISTRY: dict[str, type[ToolRenderer]] = { + "write_file": WriteFileRenderer, + "search_replace": SearchReplaceRenderer, + "todo": TodoRenderer, + "read_file": ReadFileRenderer, + "bash": BashRenderer, + "grep": GrepRenderer, +} + + +def get_renderer(tool_name: str) -> ToolRenderer: + renderer_class = _RENDERER_REGISTRY.get(tool_name, ToolRenderer) + return renderer_class() diff --git a/vibe/cli/textual_ui/widgets/__init__.py b/vibe/cli/textual_ui/widgets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/cli/textual_ui/widgets/approval_app.py b/vibe/cli/textual_ui/widgets/approval_app.py new file mode 100644 index 0000000..1c5c0ab --- /dev/null +++ b/vibe/cli/textual_ui/widgets/approval_app.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +from typing import ClassVar + +from textual import events +from textual.app import ComposeResult +from textual.binding import Binding, BindingType +from textual.containers import Container, Vertical, VerticalScroll +from textual.message import Message +from textual.widgets import Static + +from vibe.cli.textual_ui.renderers import get_renderer +from vibe.core.config import VibeConfig + + +class ApprovalApp(Container): + can_focus = True + can_focus_children = False + + BINDINGS: ClassVar[list[BindingType]] = [ + Binding("up", "move_up", "Up", show=False), + Binding("down", "move_down", "Down", show=False), + Binding("enter", "select", "Select", show=False), + Binding("1", "select_1", "Yes", show=False), + Binding("y", "select_1", "Yes", show=False), + Binding("2", "select_2", "Always Tool Session", show=False), + Binding("3", "select_3", "No", show=False), + Binding("n", "select_3", "No", show=False), + ] + + class ApprovalGranted(Message): + def __init__(self, tool_name: str, tool_args: dict) -> None: + super().__init__() + self.tool_name = tool_name + self.tool_args = tool_args + + class ApprovalGrantedAlwaysTool(Message): + def __init__( + self, tool_name: str, tool_args: dict, save_permanently: bool + ) -> None: + super().__init__() + self.tool_name = tool_name + self.tool_args = tool_args + self.save_permanently = save_permanently + + class ApprovalRejected(Message): + def __init__(self, tool_name: str, tool_args: dict) -> None: + super().__init__() + self.tool_name = tool_name + self.tool_args = tool_args + + def __init__( + self, tool_name: str, tool_args: dict, workdir: str, config: VibeConfig + ) -> None: + super().__init__(id="approval-app") + self.tool_name = tool_name + self.tool_args = tool_args + self.workdir = workdir + self.config = config + self.selected_option = 0 + self.content_container: Vertical | None = None + self.title_widget: Static | None = None + self.tool_info_container: Vertical | None = None + self.option_widgets: list[Static] = [] + self.help_widget: Static | None = None + + def compose(self) -> ComposeResult: + with Vertical(id="approval-content"): + self.title_widget = Static( + f"⚠ {self.tool_name} command", classes="approval-title" + ) + yield self.title_widget + + with VerticalScroll(classes="approval-tool-info-scroll"): + self.tool_info_container = Vertical( + classes="approval-tool-info-container" + ) + yield self.tool_info_container + + yield Static("") + + for _ in range(3): + widget = Static("", classes="approval-option") + self.option_widgets.append(widget) + yield widget + + yield Static("") + + self.help_widget = Static( + "↑↓ navigate Enter select ESC reject", classes="approval-help" + ) + yield self.help_widget + + async def on_mount(self) -> None: + await self._update_tool_info() + self._update_options() + self.focus() + + async def _update_tool_info(self) -> None: + if not self.tool_info_container: + return + + renderer = get_renderer(self.tool_name) + widget_class, data = renderer.get_approval_widget(self.tool_args) + + await self.tool_info_container.remove_children() + approval_widget = widget_class(data) + await self.tool_info_container.mount(approval_widget) + + def _update_options(self) -> None: + options = [ + ("Yes", "yes"), + (f"Yes and always allow {self.tool_name} this session", "yes"), + ("No and tell the agent what to do instead", "no"), + ] + + for idx, ((text, color_type), widget) in enumerate( + zip(options, self.option_widgets, strict=True) + ): + is_selected = idx == self.selected_option + + cursor = "› " if is_selected else " " + option_text = f"{cursor}{idx + 1}. {text}" + + widget.update(option_text) + + widget.remove_class("approval-cursor-selected") + widget.remove_class("approval-option-selected") + widget.remove_class("approval-option-yes") + widget.remove_class("approval-option-no") + + if is_selected: + widget.add_class("approval-cursor-selected") + if color_type == "yes": + widget.add_class("approval-option-yes") + else: + widget.add_class("approval-option-no") + else: + widget.add_class("approval-option-selected") + if color_type == "yes": + widget.add_class("approval-option-yes") + else: + widget.add_class("approval-option-no") + + def action_move_up(self) -> None: + self.selected_option = (self.selected_option - 1) % 3 + self._update_options() + + def action_move_down(self) -> None: + self.selected_option = (self.selected_option + 1) % 3 + self._update_options() + + def action_select(self) -> None: + self._handle_selection(self.selected_option) + + def action_select_1(self) -> None: + self.selected_option = 0 + self._handle_selection(0) + + def action_select_2(self) -> None: + self.selected_option = 1 + self._handle_selection(1) + + def action_select_3(self) -> None: + self.selected_option = 2 + self._handle_selection(2) + + def action_reject(self) -> None: + self.selected_option = 2 + self._handle_selection(2) + + def _handle_selection(self, option: int) -> None: + match option: + case 0: + self.post_message( + self.ApprovalGranted( + tool_name=self.tool_name, tool_args=self.tool_args + ) + ) + case 1: + self.post_message( + self.ApprovalGrantedAlwaysTool( + tool_name=self.tool_name, + tool_args=self.tool_args, + save_permanently=False, + ) + ) + case 2: + self.post_message( + self.ApprovalRejected( + tool_name=self.tool_name, tool_args=self.tool_args + ) + ) + + def on_blur(self, event: events.Blur) -> None: + self.call_after_refresh(self.focus) diff --git a/vibe/cli/textual_ui/widgets/blinking_message.py b/vibe/cli/textual_ui/widgets/blinking_message.py new file mode 100644 index 0000000..911f84b --- /dev/null +++ b/vibe/cli/textual_ui/widgets/blinking_message.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from typing import Any + +from textual.app import ComposeResult +from textual.containers import Horizontal +from textual.widgets import Static + + +class BlinkingMessage(Static): + def __init__(self, initial_text: str = "", **kwargs: Any) -> None: + self.blink_state = False + self._blink_timer = None + self._is_blinking = True + self.success = True + self._initial_text = initial_text + self._dot_widget: Static | None = None + self._text_widget: Static | None = None + super().__init__(**kwargs) + + def compose(self) -> ComposeResult: + with Horizontal(): + self._dot_widget = Static("● ", classes="blink-dot") + yield self._dot_widget + self._text_widget = Static("", markup=False, classes="blink-text") + yield self._text_widget + + def on_mount(self) -> None: + self.update_display() + self._blink_timer = self.set_interval(0.5, self.toggle_blink) + + def toggle_blink(self) -> None: + if not self._is_blinking: + return + self.blink_state = not self.blink_state + self.update_display() + + def update_display(self) -> None: + if not self._dot_widget or not self._text_widget: + return + + content = self.get_content() + + if self._is_blinking: + dot = "● " if self.blink_state else "○ " + self._dot_widget.update(dot) + self._dot_widget.remove_class("success") + self._dot_widget.remove_class("error") + else: + self._dot_widget.update("● ") + if self.success: + self._dot_widget.add_class("success") + self._dot_widget.remove_class("error") + else: + self._dot_widget.add_class("error") + self._dot_widget.remove_class("success") + + self._text_widget.update(content) + + def get_content(self) -> str: + return self._initial_text + + def stop_blinking(self, success: bool = True) -> None: + self._is_blinking = False + self.blink_state = True + self.success = success + self.update_display() diff --git a/vibe/cli/textual_ui/widgets/chat_input/__init__.py b/vibe/cli/textual_ui/widgets/chat_input/__init__.py new file mode 100644 index 0000000..07b6b89 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/chat_input/__init__.py @@ -0,0 +1,7 @@ +from __future__ import annotations + +from vibe.cli.textual_ui.widgets.chat_input.body import ChatInputBody +from vibe.cli.textual_ui.widgets.chat_input.container import ChatInputContainer +from vibe.cli.textual_ui.widgets.chat_input.text_area import ChatTextArea + +__all__ = ["ChatInputBody", "ChatInputContainer", "ChatTextArea"] diff --git a/vibe/cli/textual_ui/widgets/chat_input/body.py b/vibe/cli/textual_ui/widgets/chat_input/body.py new file mode 100644 index 0000000..4f89bca --- /dev/null +++ b/vibe/cli/textual_ui/widgets/chat_input/body.py @@ -0,0 +1,194 @@ +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path +from typing import Any + +from textual.app import ComposeResult +from textual.containers import Horizontal +from textual.message import Message +from textual.widget import Widget +from textual.widgets import Static + +from vibe.cli.history_manager import HistoryManager +from vibe.cli.textual_ui.widgets.chat_input.text_area import ChatTextArea + + +class ChatInputBody(Widget): + class Submitted(Message): + def __init__(self, value: str) -> None: + self.value = value + super().__init__() + + def __init__(self, history_file: Path | None = None, **kwargs: Any) -> None: + super().__init__(**kwargs) + self.input_widget: ChatTextArea | None = None + self.prompt_widget: Static | None = None + + if history_file: + self.history = HistoryManager(history_file) + else: + self.history = None + + self._completion_reset: Callable[[], None] | None = None + + def compose(self) -> ComposeResult: + with Horizontal(): + self.prompt_widget = Static(">", id="prompt") + yield self.prompt_widget + + self.input_widget = ChatTextArea(placeholder="Ask anything...", id="input") + yield self.input_widget + + def on_mount(self) -> None: + if self.input_widget: + self.input_widget.focus() + + def _update_prompt(self) -> None: + if not self.input_widget or not self.prompt_widget: + return + + text = self.input_widget.text + if text.startswith("!"): + self.prompt_widget.update("!") + elif text.startswith("/"): + self.prompt_widget.update("/") + else: + self.prompt_widget.update(">") + + def _load_history_entry(self, text: str, cursor_col: int | None = None) -> None: + if not self.input_widget: + return + + self.input_widget._navigating_history = True + self.input_widget.load_text(text) + + first_line = text.split("\n")[0] if text else "" + col = cursor_col if cursor_col is not None else len(first_line) + cursor_pos = (0, col) + + self.input_widget.move_cursor(cursor_pos) + self.input_widget._last_cursor_col = col + self.input_widget._cursor_pos_after_load = cursor_pos + self.input_widget._cursor_moved_since_load = False + + self._update_prompt() + self._notify_completion_reset() + + def on_chat_text_area_history_previous( + self, event: ChatTextArea.HistoryPrevious + ) -> None: + if not self.history or not self.input_widget: + return + + if self.history._current_index == -1: + self.input_widget._original_text = self.input_widget.text + + if ( + self.history._current_index != -1 + and self.input_widget._last_used_prefix is not None + and self.input_widget._last_used_prefix != event.prefix + ): + self.history.reset_navigation() + + self.input_widget._last_used_prefix = event.prefix + previous = self.history.get_previous( + self.input_widget._original_text, prefix=event.prefix + ) + + if previous is not None: + self._load_history_entry(previous) + + def on_chat_text_area_history_next(self, event: ChatTextArea.HistoryNext) -> None: + if not self.history or not self.input_widget: + return + + if self.history._current_index == -1: + return + + if ( + self.input_widget._last_used_prefix is not None + and self.input_widget._last_used_prefix != event.prefix + ): + self.history.reset_navigation() + + self.input_widget._last_used_prefix = event.prefix + + has_next = any( + self.history._entries[i].startswith(event.prefix) + for i in range(self.history._current_index + 1, len(self.history._entries)) + ) + + original_matches = self.input_widget._original_text.startswith(event.prefix) + + if has_next or original_matches: + next_entry = self.history.get_next(prefix=event.prefix) + if next_entry is not None: + cursor_col = ( + len(event.prefix) if self.history._current_index == -1 else None + ) + self._load_history_entry(next_entry, cursor_col=cursor_col) + + def on_chat_text_area_history_reset(self, event: ChatTextArea.HistoryReset) -> None: + if self.history: + self.history.reset_navigation() + if self.input_widget: + self.input_widget._original_text = "" + self.input_widget._cursor_pos_after_load = None + self.input_widget._cursor_moved_since_load = False + + def on_text_area_changed(self, event: ChatTextArea.Changed) -> None: + self._update_prompt() + + def on_chat_text_area_submitted(self, event: ChatTextArea.Submitted) -> None: + event.stop() + + if not self.input_widget: + return + + value = event.value.strip() + if value: + if self.history: + self.history.add(value) + self.history.reset_navigation() + + self.input_widget.clear_text() + self._update_prompt() + + self._notify_completion_reset() + + self.post_message(self.Submitted(value)) + + @property + def value(self) -> str: + return self.input_widget.text if self.input_widget else "" + + @value.setter + def value(self, text: str) -> None: + if self.input_widget: + self.input_widget.load_text(text) + self._update_prompt() + + def focus_input(self) -> None: + if self.input_widget: + self.input_widget.focus() + + def set_completion_reset_callback( + self, callback: Callable[[], None] | None + ) -> None: + self._completion_reset = callback + + def _notify_completion_reset(self) -> None: + if self._completion_reset: + self._completion_reset() + + def replace_input(self, text: str, cursor_offset: int | None = None) -> None: + if not self.input_widget: + return + + self.input_widget.load_text(text) + self.input_widget.reset_history_state() + self._update_prompt() + + if cursor_offset is not None: + self.input_widget.set_cursor_offset(max(0, min(cursor_offset, len(text)))) diff --git a/vibe/cli/textual_ui/widgets/chat_input/completion_manager.py b/vibe/cli/textual_ui/widgets/chat_input/completion_manager.py new file mode 100644 index 0000000..d6eab56 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/chat_input/completion_manager.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from collections.abc import Sequence +from typing import Protocol + +from textual import events + +from vibe.cli.autocompletion.base import CompletionResult + + +class CompletionController(Protocol): + def can_handle(self, text: str, cursor_index: int) -> bool: ... + + def on_text_changed(self, text: str, cursor_index: int) -> None: ... + + def on_key( + self, event: events.Key, text: str, cursor_index: int + ) -> CompletionResult: ... + + def reset(self) -> None: ... + + +class MultiCompletionManager: + def __init__(self, controllers: Sequence[CompletionController]) -> None: + self._controllers = list(controllers) + self._active: CompletionController | None = None + + def on_text_changed(self, text: str, cursor_index: int) -> None: + candidate = None + for controller in self._controllers: + if controller.can_handle(text, cursor_index): + candidate = controller + break + + if candidate is None: + if self._active is not None: + self._active.reset() + self._active = None + return + + if candidate is not self._active: + if self._active is not None: + self._active.reset() + self._active = candidate + + candidate.on_text_changed(text, cursor_index) + + def on_key( + self, event: events.Key, text: str, cursor_index: int + ) -> CompletionResult: + if self._active is None: + return CompletionResult.IGNORED + return self._active.on_key(event, text, cursor_index) + + def reset(self) -> None: + if self._active is not None: + self._active.reset() + self._active = None diff --git a/vibe/cli/textual_ui/widgets/chat_input/completion_popup.py b/vibe/cli/textual_ui/widgets/chat_input/completion_popup.py new file mode 100644 index 0000000..7ed81b5 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/chat_input/completion_popup.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from typing import Any + +from rich.text import Text +from textual.widgets import Static + + +class CompletionPopup(Static): + def __init__(self, **kwargs: Any) -> None: + super().__init__("", id="completion-popup", **kwargs) + self.styles.display = "none" + self.can_focus = False + + def update_suggestions( + self, suggestions: list[tuple[str, str]], selected: int + ) -> None: + if not suggestions: + self.hide() + return + + text = Text() + for idx, (label, description) in enumerate(suggestions): + if idx: + text.append("\n") + + label_style = "bold reverse" if idx == selected else "bold" + description_style = "italic" if idx == selected else "dim" + + text.append(label, style=label_style) + if description: + text.append(" ") + text.append(description, style=description_style) + + self.update(text) + self.show() + + def hide(self) -> None: + self.update("") + self.styles.display = "none" + + def show(self) -> None: + self.styles.display = "block" diff --git a/vibe/cli/textual_ui/widgets/chat_input/container.py b/vibe/cli/textual_ui/widgets/chat_input/container.py new file mode 100644 index 0000000..4d09d48 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/chat_input/container.py @@ -0,0 +1,157 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from textual.app import ComposeResult +from textual.containers import Vertical +from textual.message import Message + +from vibe.cli.autocompletion.path_completion import PathCompletionController +from vibe.cli.autocompletion.slash_command import SlashCommandController +from vibe.cli.commands import CommandRegistry +from vibe.cli.textual_ui.widgets.chat_input.body import ChatInputBody +from vibe.cli.textual_ui.widgets.chat_input.completion_manager import ( + MultiCompletionManager, +) +from vibe.cli.textual_ui.widgets.chat_input.completion_popup import CompletionPopup +from vibe.cli.textual_ui.widgets.chat_input.text_area import ChatTextArea +from vibe.core.autocompletion.completers import CommandCompleter, PathCompleter + + +class ChatInputContainer(Vertical): + ID_INPUT_BOX = "input-box" + BORDER_WARNING_CLASS = "border-warning" + + class Submitted(Message): + def __init__(self, value: str) -> None: + self.value = value + super().__init__() + + def __init__( + self, + history_file: Path | None = None, + command_registry: CommandRegistry | None = None, + show_warning: bool = False, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + self._history_file = history_file + self._command_registry = command_registry or CommandRegistry() + self._show_warning = show_warning + + command_entries = [ + (alias, command.description) + for command in self._command_registry.commands.values() + for alias in sorted(command.aliases) + ] + + self._completion_manager = MultiCompletionManager([ + SlashCommandController(CommandCompleter(command_entries), self), + PathCompletionController(PathCompleter(), self), + ]) + self._completion_popup: CompletionPopup | None = None + self._body: ChatInputBody | None = None + + def compose(self) -> ComposeResult: + self._completion_popup = CompletionPopup() + yield self._completion_popup + + with Vertical( + id=self.ID_INPUT_BOX, classes="border-warning" if self._show_warning else "" + ): + self._body = ChatInputBody(history_file=self._history_file, id="input-body") + + yield self._body + + def on_mount(self) -> None: + if not self._body: + return + + self._body.set_completion_reset_callback(self._completion_manager.reset) + if self._body.input_widget: + self._body.input_widget.set_completion_manager(self._completion_manager) + self._body.focus_input() + + @property + def input_widget(self) -> ChatTextArea | None: + return self._body.input_widget if self._body else None + + @property + def value(self) -> str: + if not self._body: + return "" + return self._body.value + + @value.setter + def value(self, text: str) -> None: + if not self._body: + return + self._body.value = text + widget = self._body.input_widget + if widget: + self._completion_manager.on_text_changed( + widget.text, widget.get_cursor_offset() + ) + + def focus_input(self) -> None: + if self._body: + self._body.focus_input() + + def render_completion_suggestions( + self, suggestions: list[tuple[str, str]], selected_index: int + ) -> None: + if self._completion_popup: + self._completion_popup.update_suggestions(suggestions, selected_index) + + def clear_completion_suggestions(self) -> None: + if self._completion_popup: + self._completion_popup.hide() + + def _format_insertion(self, replacement: str, suffix: str) -> str: + """Format the insertion text with appropriate spacing. + + Args: + replacement: The text to insert + suffix: The text that follows the insertion point + + Returns: + The formatted insertion text with spacing if needed + """ + if replacement.startswith("@"): + if replacement.endswith("/"): + return replacement + # For @-prefixed completions, add space unless suffix starts with whitespace + return replacement + (" " if not suffix or not suffix[0].isspace() else "") + + # For other completions, add space only if suffix exists and doesn't start with whitespace + return replacement + (" " if suffix and not suffix[0].isspace() else "") + + def replace_completion_range(self, start: int, end: int, replacement: str) -> None: + widget = self.input_widget + if not widget or not self._body: + return + + text = widget.text + start = max(0, min(start, len(text))) + end = max(start, min(end, len(text))) + + prefix = text[:start] + suffix = text[end:] + insertion = self._format_insertion(replacement, suffix) + new_text = f"{prefix}{insertion}{suffix}" + + self._body.replace_input(new_text, cursor_offset=start + len(insertion)) + + def on_chat_input_body_submitted(self, event: ChatInputBody.Submitted) -> None: + event.stop() + self.post_message(self.Submitted(event.value)) + + def set_show_warning(self, show_warning: bool) -> None: + self._show_warning = show_warning + + input_box = self.get_widget_by_id(self.ID_INPUT_BOX) + if show_warning: + input_box.add_class(self.BORDER_WARNING_CLASS) + else: + input_box.remove_class(self.BORDER_WARNING_CLASS) diff --git a/vibe/cli/textual_ui/widgets/chat_input/text_area.py b/vibe/cli/textual_ui/widgets/chat_input/text_area.py new file mode 100644 index 0000000..fbdcd48 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/chat_input/text_area.py @@ -0,0 +1,246 @@ +from __future__ import annotations + +from typing import Any, ClassVar + +from textual import events +from textual.binding import Binding +from textual.message import Message +from textual.widgets import TextArea + +from vibe.cli.autocompletion.base import CompletionResult +from vibe.cli.textual_ui.widgets.chat_input.completion_manager import ( + MultiCompletionManager, +) + + +class ChatTextArea(TextArea): + BINDINGS: ClassVar[list[Binding]] = [ + Binding( + "shift+enter,ctrl+j", + "insert_newline", + "New Line", + show=False, + priority=True, + ) + ] + + class Submitted(Message): + def __init__(self, value: str) -> None: + self.value = value + super().__init__() + + class HistoryPrevious(Message): + def __init__(self, prefix: str) -> None: + self.prefix = prefix + super().__init__() + + class HistoryNext(Message): + def __init__(self, prefix: str) -> None: + self.prefix = prefix + super().__init__() + + class HistoryReset(Message): + """Message sent when history navigation should be reset.""" + + def __init__(self, **kwargs: Any) -> None: + super().__init__(**kwargs) + self._history_prefix: str | None = None + self._last_text = "" + self._navigating_history = False + self._last_cursor_col: int = 0 + self._last_used_prefix: str | None = None + self._original_text: str = "" + self._cursor_pos_after_load: tuple[int, int] | None = None + self._cursor_moved_since_load: bool = False + self._completion_manager: MultiCompletionManager | None = None + + def on_blur(self, event: events.Blur) -> None: + self.call_after_refresh(self.focus) + + def on_click(self, event: events.Click) -> None: + self._mark_cursor_moved_if_needed() + + def action_insert_newline(self) -> None: + self.insert("\n") + + def on_text_area_changed(self, event: TextArea.Changed) -> None: + if not self._navigating_history and self.text != self._last_text: + self._reset_prefix() + self._original_text = "" + self._cursor_pos_after_load = None + self._cursor_moved_since_load = False + self.post_message(self.HistoryReset()) + self._last_text = self.text + was_navigating_history = self._navigating_history + self._navigating_history = False + + if self._completion_manager and not was_navigating_history: + self._completion_manager.on_text_changed( + self.text, self.get_cursor_offset() + ) + + def _reset_prefix(self) -> None: + self._history_prefix = None + self._last_used_prefix = None + + def _mark_cursor_moved_if_needed(self) -> None: + if ( + self._cursor_pos_after_load is not None + and not self._cursor_moved_since_load + and self.cursor_location != self._cursor_pos_after_load + ): + self._cursor_moved_since_load = True + self._reset_prefix() + + def _get_prefix_up_to_cursor(self) -> str: + cursor_row, cursor_col = self.cursor_location + lines = self.text.split("\n") + if cursor_row < len(lines): + return lines[cursor_row][:cursor_col] + return "" + + def _handle_history_up(self) -> bool: + cursor_row, cursor_col = self.cursor_location + if cursor_row == 0: + if self._history_prefix is not None and cursor_col != self._last_cursor_col: + self._reset_prefix() + self._last_cursor_col = 0 + + if self._history_prefix is None: + self._history_prefix = self._get_prefix_up_to_cursor() + + self._navigating_history = True + self.post_message(self.HistoryPrevious(self._history_prefix)) + return True + return False + + def _handle_history_down(self) -> bool: + cursor_row, cursor_col = self.cursor_location + total_lines = self.text.count("\n") + 1 + + on_first_line_unmoved = cursor_row == 0 and not self._cursor_moved_since_load + on_last_line = cursor_row == total_lines - 1 + + should_intercept = ( + on_first_line_unmoved and self._history_prefix is not None + ) or on_last_line + + if not should_intercept: + return False + + if self._history_prefix is not None and cursor_col != self._last_cursor_col: + self._reset_prefix() + self._last_cursor_col = 0 + + if self._history_prefix is None: + self._history_prefix = self._get_prefix_up_to_cursor() + + self._navigating_history = True + self.post_message(self.HistoryNext(self._history_prefix)) + return True + + async def _on_key(self, event: events.Key) -> None: + self._mark_cursor_moved_if_needed() + + manager = self._completion_manager + if manager: + match manager.on_key(event, self.text, self.get_cursor_offset()): + case CompletionResult.HANDLED: + event.prevent_default() + event.stop() + return + case CompletionResult.SUBMIT: + event.prevent_default() + event.stop() + value = self.text.strip() + if value: + self._reset_prefix() + self.post_message(self.Submitted(value)) + return + + if event.key == "enter": + event.prevent_default() + event.stop() + value = self.text.strip() + if value: + self._reset_prefix() + self.post_message(self.Submitted(value)) + return + + if event.key == "shift+enter": + event.prevent_default() + event.stop() + return + + if event.key == "up" and self._handle_history_up(): + event.prevent_default() + event.stop() + return + + if event.key == "down" and self._handle_history_down(): + event.prevent_default() + event.stop() + return + + await super()._on_key(event) + self._mark_cursor_moved_if_needed() + + def set_completion_manager(self, manager: MultiCompletionManager | None) -> None: + self._completion_manager = manager + if self._completion_manager: + self._completion_manager.on_text_changed( + self.text, self.get_cursor_offset() + ) + + def get_cursor_offset(self) -> int: + text = self.text + row, col = self.cursor_location + + if not text: + return 0 + + lines = text.split("\n") + row = max(0, min(row, len(lines) - 1)) + col = max(0, col) + + offset = sum(len(lines[i]) + 1 for i in range(row)) + return offset + min(col, len(lines[row])) + + def set_cursor_offset(self, offset: int) -> None: + text = self.text + if offset <= 0: + self.move_cursor((0, 0)) + return + + if offset >= len(text): + lines = text.split("\n") + if not lines: + self.move_cursor((0, 0)) + return + last_row = len(lines) - 1 + self.move_cursor((last_row, len(lines[last_row]))) + return + + remaining = offset + lines = text.split("\n") + + for row, line in enumerate(lines): + line_length = len(line) + if remaining <= line_length: + self.move_cursor((row, remaining)) + return + remaining -= line_length + 1 + + last_row = len(lines) - 1 + self.move_cursor((last_row, len(lines[last_row]))) + + def reset_history_state(self) -> None: + self._reset_prefix() + self._original_text = "" + self._cursor_pos_after_load = None + self._cursor_moved_since_load = False + self._last_text = self.text + + def clear_text(self) -> None: + self.clear() + self.reset_history_state() diff --git a/vibe/cli/textual_ui/widgets/compact.py b/vibe/cli/textual_ui/widgets/compact.py new file mode 100644 index 0000000..be1a73b --- /dev/null +++ b/vibe/cli/textual_ui/widgets/compact.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from vibe.cli.textual_ui.widgets.blinking_message import BlinkingMessage + + +class CompactMessage(BlinkingMessage): + def __init__(self) -> None: + super().__init__() + self.add_class("compact-message") + self.old_tokens: int | None = None + self.new_tokens: int | None = None + self.error_message: str | None = None + + def get_content(self) -> str: + if self._is_blinking: + return "Compacting conversation history..." + + if self.error_message: + return f"Error: {self.error_message}" + + if self.old_tokens is not None and self.new_tokens is not None: + reduction = self.old_tokens - self.new_tokens + reduction_pct = ( + (reduction / self.old_tokens * 100) if self.old_tokens > 0 else 0 + ) + return ( + f"Compaction complete: {self.old_tokens:,} → " + f"{self.new_tokens:,} tokens (-{reduction_pct:.1f}%)" + ) + + return "Compaction complete" + + def set_complete( + self, old_tokens: int | None = None, new_tokens: int | None = None + ) -> None: + self.old_tokens = old_tokens + self.new_tokens = new_tokens + self.stop_blinking(success=True) + + def set_error(self, error_message: str) -> None: + self.error_message = error_message + self.stop_blinking(success=False) diff --git a/vibe/cli/textual_ui/widgets/config_app.py b/vibe/cli/textual_ui/widgets/config_app.py new file mode 100644 index 0000000..897ea9e --- /dev/null +++ b/vibe/cli/textual_ui/widgets/config_app.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, ClassVar, TypedDict + +from textual import events +from textual.app import ComposeResult +from textual.binding import Binding, BindingType +from textual.containers import Container, Vertical +from textual.message import Message +from textual.theme import BUILTIN_THEMES +from textual.widgets import Static + +if TYPE_CHECKING: + from vibe.core.config import VibeConfig + +THEMES = sorted(k for k in BUILTIN_THEMES if k != "textual-ansi") + + +class SettingDefinition(TypedDict): + key: str + label: str + type: str + options: list[str] + value: str + + +class ConfigApp(Container): + can_focus = True + can_focus_children = False + + BINDINGS: ClassVar[list[BindingType]] = [ + Binding("up", "move_up", "Up", show=False), + Binding("down", "move_down", "Down", show=False), + Binding("space", "toggle_setting", "Toggle", show=False), + Binding("enter", "cycle", "Next", show=False), + ] + + class SettingChanged(Message): + def __init__(self, key: str, value: str) -> None: + super().__init__() + self.key = key + self.value = value + + class ConfigClosed(Message): + def __init__(self, changes: dict[str, str]) -> None: + super().__init__() + self.changes = changes + + def __init__(self, config: VibeConfig) -> None: + super().__init__(id="config-app") + self.config = config + self.selected_index = 0 + self.changes: dict[str, str] = {} + + self.settings: list[SettingDefinition] = [ + { + "key": "active_model", + "label": "Model", + "type": "cycle", + "options": [m.alias for m in self.config.models], + "value": self.config.active_model, + }, + { + "key": "textual_theme", + "label": "Theme", + "type": "cycle", + "options": THEMES, + "value": self.config.textual_theme, + }, + ] + + self.title_widget: Static | None = None + self.setting_widgets: list[Static] = [] + self.help_widget: Static | None = None + + def compose(self) -> ComposeResult: + with Vertical(id="config-content"): + self.title_widget = Static("Settings", classes="settings-title") + yield self.title_widget + + yield Static("") + + for _ in self.settings: + widget = Static("", classes="settings-option") + self.setting_widgets.append(widget) + yield widget + + yield Static("") + + self.help_widget = Static( + "↑↓ navigate Space/Enter toggle ESC exit", classes="settings-help" + ) + yield self.help_widget + + def on_mount(self) -> None: + self._update_display() + self.focus() + + def _update_display(self) -> None: + for i, (setting, widget) in enumerate( + zip(self.settings, self.setting_widgets, strict=True) + ): + is_selected = i == self.selected_index + cursor = "› " if is_selected else " " + + label: str = setting["label"] + value: str = self.changes.get(setting["key"], setting["value"]) + + text = f"{cursor}{label}: {value}" + + widget.update(text) + + widget.remove_class("settings-cursor-selected") + widget.remove_class("settings-value-cycle-selected") + widget.remove_class("settings-value-cycle-unselected") + + if is_selected: + widget.add_class("settings-value-cycle-selected") + else: + widget.add_class("settings-value-cycle-unselected") + + def action_move_up(self) -> None: + self.selected_index = (self.selected_index - 1) % len(self.settings) + self._update_display() + + def action_move_down(self) -> None: + self.selected_index = (self.selected_index + 1) % len(self.settings) + self._update_display() + + def action_toggle_setting(self) -> None: + setting = self.settings[self.selected_index] + key: str = setting["key"] + current: str = self.changes.get(key, setting["value"]) + + options: list[str] = setting["options"] + try: + current_idx = options.index(current) + next_idx = (current_idx + 1) % len(options) + new_value: str = options[next_idx] + except (ValueError, IndexError): + new_value: str = options[0] if options else current + + self.changes[key] = new_value + + self.post_message(self.SettingChanged(key=key, value=new_value)) + + self._update_display() + + def action_cycle(self) -> None: + self.action_toggle_setting() + + def action_close(self) -> None: + self.post_message(self.ConfigClosed(changes=self.changes.copy())) + + def on_blur(self, event: events.Blur) -> None: + self.call_after_refresh(self.focus) diff --git a/vibe/cli/textual_ui/widgets/context_progress.py b/vibe/cli/textual_ui/widgets/context_progress.py new file mode 100644 index 0000000..63d62bf --- /dev/null +++ b/vibe/cli/textual_ui/widgets/context_progress.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + +from textual.reactive import reactive +from textual.widgets import Static + + +@dataclass +class TokenState: + max_tokens: int = 0 + current_tokens: int = 0 + + +class ContextProgress(Static): + tokens = reactive(TokenState()) + + def __init__(self, **kwargs: Any) -> None: + super().__init__(**kwargs) + + def watch_tokens(self, new_state: TokenState) -> None: + if new_state.max_tokens == 0: + self.update("") + return + + percentage = min( + 100, int((new_state.current_tokens / new_state.max_tokens) * 100) + ) + text = f"{percentage}% of {new_state.max_tokens // 1000}k tokens" + self.update(text) diff --git a/vibe/cli/textual_ui/widgets/loading.py b/vibe/cli/textual_ui/widgets/loading.py new file mode 100644 index 0000000..260f7c5 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/loading.py @@ -0,0 +1,157 @@ +from __future__ import annotations + +from datetime import datetime +import random +from time import time +from typing import ClassVar + +from textual.app import ComposeResult +from textual.containers import Horizontal +from textual.widgets import Static + + +class LoadingWidget(Static): + BRAILLE_SPINNER = ("⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏") + + TARGET_COLORS = ("#FFD800", "#FFAF00", "#FF8205", "#FA500F", "#E10500") + + EASTER_EGGS: ClassVar[list[str]] = [ + "Eating a chocolatine", + "Eating a pain au chocolat", + "Réflexion", + "Analyse", + "Contemplation", + "Synthèse", + "Reading Proust", + "Oui oui baguette", + "Counting Rs in strawberry", + "Seeding Mistral weights", + "Vibing", + "Sending good vibes", + "Petting le chat", + ] + + EASTER_EGGS_HALLOWEEN: ClassVar[list[str]] = [ + "Trick or treating", + "Carving pumpkins", + "Summoning spirits", + "Brewing potions", + "Haunting the terminal", + "Petting le chat noir", + ] + + EASTER_EGGS_DECEMBER: ClassVar[list[str]] = [ + "Wrapping presents", + "Decorating the tree", + "Drinking hot chocolate", + "Building snowmen", + "Writing holiday cards", + ] + + def __init__(self, status: str | None = None) -> None: + super().__init__(classes="loading-widget") + self.status = status or self._get_default_status() + self.gradient_offset = 0 + self.spinner_pos = 0 + self.char_widgets: list[Static] = [] + self.spinner_widget: Static | None = None + self.ellipsis_widget: Static | None = None + self.hint_widget: Static | None = None + self.start_time: float | None = None + + def _get_easter_egg(self) -> str | None: + EASTER_EGG_PROBABILITY = 0.10 + if random.random() < EASTER_EGG_PROBABILITY: + available_eggs = list(self.EASTER_EGGS) + + OCTOBER = 10 + HALLOWEEN_DAY = 31 + DECEMBER = 12 + now = datetime.now() + if now.month == OCTOBER and now.day == HALLOWEEN_DAY: + available_eggs.extend(self.EASTER_EGGS_HALLOWEEN) + if now.month == DECEMBER: + available_eggs.extend(self.EASTER_EGGS_DECEMBER) + + return random.choice(available_eggs) + return None + + def _get_default_status(self) -> str: + return self._get_easter_egg() or "Thinking" + + def _apply_easter_egg(self, status: str) -> str: + return self._get_easter_egg() or status + + def set_status(self, status: str) -> None: + self.status = self._apply_easter_egg(status) + self.gradient_offset = 0 + self._rebuild_chars() + + def compose(self) -> ComposeResult: + with Horizontal(classes="loading-container"): + self.spinner_widget = Static( + self.BRAILLE_SPINNER[0] + " ", classes="loading-star" + ) + yield self.spinner_widget + + with Horizontal(classes="loading-status"): + for char in self.status: + widget = Static(char, classes="loading-char") + self.char_widgets.append(widget) + yield widget + + self.ellipsis_widget = Static("… ", classes="loading-ellipsis") + yield self.ellipsis_widget + + self.hint_widget = Static("(0s esc to interrupt)", classes="loading-hint") + yield self.hint_widget + + def _rebuild_chars(self) -> None: + if not self.is_mounted: + return + + status_container = self.query_one(".loading-status", Horizontal) + + status_container.remove_children() + self.char_widgets.clear() + + for char in self.status: + widget = Static(char, classes="loading-char") + self.char_widgets.append(widget) + status_container.mount(widget) + + self.update_animation() + + def on_mount(self) -> None: + self.start_time = time() + self.update_animation() + self.set_interval(0.1, self.update_animation) + + def _get_gradient_color(self, position: int) -> str: + color_index = (position - self.gradient_offset) % len(self.TARGET_COLORS) + return self.TARGET_COLORS[color_index] + + def update_animation(self) -> None: + if self.spinner_widget: + spinner_char = self.BRAILLE_SPINNER[self.spinner_pos] + color_0 = self._get_gradient_color(0) + color_1 = self._get_gradient_color(1) + self.spinner_widget.update(f"[{color_0}]{spinner_char}[/][{color_1}] [/]") + self.spinner_pos = (self.spinner_pos + 1) % len(self.BRAILLE_SPINNER) + + for i, widget in enumerate(self.char_widgets): + position = 2 + i + color = self._get_gradient_color(position) + widget.update(f"[{color}]{self.status[i]}[/]") + + if self.ellipsis_widget: + ellipsis_start = 2 + len(self.status) + color_ellipsis = self._get_gradient_color(ellipsis_start) + color_space = self._get_gradient_color(ellipsis_start + 1) + self.ellipsis_widget.update(f"[{color_ellipsis}]…[/][{color_space}] [/]") + + self.gradient_offset = (self.gradient_offset + 1) % len(self.TARGET_COLORS) + + if self.hint_widget and self.start_time is not None: + elapsed = int(time() - self.start_time) + self.hint_widget.update(f"({elapsed}s esc to interrupt)") diff --git a/vibe/cli/textual_ui/widgets/messages.py b/vibe/cli/textual_ui/widgets/messages.py new file mode 100644 index 0000000..4975ec7 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/messages.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +from textual.app import ComposeResult +from textual.containers import Horizontal, Vertical +from textual.widgets import Markdown, Static +from textual.widgets._markdown import MarkdownStream + + +class UserMessage(Static): + def __init__(self, content: str, pending: bool = False) -> None: + super().__init__() + self.add_class("user-message") + self._content = content + self._pending = pending + + def compose(self) -> ComposeResult: + with Horizontal(classes="user-message-container"): + yield Static("> ", classes="user-message-prompt") + yield Static(self._content, markup=False, classes="user-message-content") + if self._pending: + self.add_class("pending") + + async def set_pending(self, pending: bool) -> None: + if pending == self._pending: + return + + self._pending = pending + + if pending: + self.add_class("pending") + return + + self.remove_class("pending") + + +class AssistantMessage(Static): + def __init__(self, content: str) -> None: + super().__init__() + self.add_class("assistant-message") + self._content = content + self._markdown: Markdown | None = None + self._stream: MarkdownStream | None = None + + def compose(self) -> ComposeResult: + with Horizontal(classes="assistant-message-container"): + yield Static("● ", classes="assistant-message-dot") + with Vertical(classes="assistant-message-content"): + markdown = Markdown("") + self._markdown = markdown + yield markdown + + def _get_markdown(self) -> Markdown: + if self._markdown is None: + self._markdown = self.query_one(Markdown) + return self._markdown + + def _ensure_stream(self) -> MarkdownStream: + if self._stream is None: + self._stream = Markdown.get_stream(self._get_markdown()) + return self._stream + + async def append_content(self, content: str) -> None: + if not content: + return + + self._content += content + stream = self._ensure_stream() + await stream.write(content) + + async def write_initial_content(self) -> None: + if self._content: + stream = self._ensure_stream() + await stream.write(self._content) + + async def stop_stream(self) -> None: + if self._stream is None: + return + + await self._stream.stop() + self._stream = None + + +class UserCommandMessage(Static): + def __init__(self, content: str) -> None: + super().__init__() + self.add_class("user-command-message") + self._content = content + + def compose(self) -> ComposeResult: + yield Markdown(self._content) + + +class InterruptMessage(Static): + def __init__(self) -> None: + super().__init__( + "Interrupted · What should Vibe do instead?", classes="interrupt-message" + ) + + +class BashOutputMessage(Static): + def __init__(self, command: str, cwd: str, output: str, exit_code: int) -> None: + super().__init__() + self.add_class("bash-output-message") + self._command = command + self._cwd = cwd + self._output = output + self._exit_code = exit_code + + def compose(self) -> ComposeResult: + with Vertical(classes="bash-output-container"): + with Horizontal(classes="bash-cwd-line"): + yield Static(self._cwd, markup=False, classes="bash-cwd") + yield Static("", classes="bash-cwd-spacer") + if self._exit_code == 0: + yield Static("✓", classes="bash-exit-success") + else: + yield Static("✗", classes="bash-exit-failure") + yield Static(f" ({self._exit_code})", classes="bash-exit-code") + with Horizontal(classes="bash-command-line"): + yield Static("> ", classes="bash-chevron") + yield Static(self._command, markup=False, classes="bash-command") + yield Static("", classes="bash-command-spacer") + yield Static(self._output, markup=False, classes="bash-output") + + +class ErrorMessage(Static): + def __init__(self, error: str, collapsed: bool = True) -> None: + super().__init__(classes="error-message") + self._error = error + self.collapsed = collapsed + + def compose(self) -> ComposeResult: + if self.collapsed: + yield Static("Error. (ctrl+o to expand)", markup=False) + else: + yield Static(f"Error: {self._error}", markup=False) + + def set_collapsed(self, collapsed: bool) -> None: + if self.collapsed == collapsed: + return + + self.collapsed = collapsed + self.remove_children() + + if self.collapsed: + self.mount(Static("Error. (ctrl+o to expand)", markup=False)) + else: + self.mount(Static(f"Error: {self._error}", markup=False)) diff --git a/vibe/cli/textual_ui/widgets/mode_indicator.py b/vibe/cli/textual_ui/widgets/mode_indicator.py new file mode 100644 index 0000000..9512e94 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/mode_indicator.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from textual.widgets import Static + + +class ModeIndicator(Static): + def __init__(self, auto_approve: bool = False) -> None: + super().__init__() + self.can_focus = False + self._auto_approve = auto_approve + self._update_display() + + def _update_display(self) -> None: + if self._auto_approve: + self.update("⏵⏵ auto-approve on (shift+tab to toggle)") + self.add_class("mode-on") + self.remove_class("mode-off") + else: + self.update("⏵ auto-approve off (shift+tab to toggle)") + self.add_class("mode-off") + self.remove_class("mode-on") + + def set_auto_approve(self, enabled: bool) -> None: + self._auto_approve = enabled + self._update_display() diff --git a/vibe/cli/textual_ui/widgets/path_display.py b/vibe/cli/textual_ui/widgets/path_display.py new file mode 100644 index 0000000..844cc90 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/path_display.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from pathlib import Path + +from textual.widgets import Static + + +class PathDisplay(Static): + def __init__(self, path: Path | str) -> None: + super().__init__() + self.can_focus = False + self._path = Path(path) + self._update_display() + + def _update_display(self) -> None: + path_str = str(self._path) + try: + home = Path.home() + if self._path.is_relative_to(home): + path_str = f"~/{self._path.relative_to(home)}" + except (ValueError, OSError): + pass + + self.update(path_str) + + def set_path(self, path: Path | str) -> None: + self._path = Path(path) + self._update_display() diff --git a/vibe/cli/textual_ui/widgets/tool_widgets.py b/vibe/cli/textual_ui/widgets/tool_widgets.py new file mode 100644 index 0000000..db5c5a2 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/tool_widgets.py @@ -0,0 +1,307 @@ +from __future__ import annotations + +from textual.app import ComposeResult +from textual.containers import Vertical +from textual.widgets import Markdown, Static + + +class ToolApprovalWidget(Vertical): + def __init__(self, data: dict) -> None: + super().__init__() + self.data = data + self.add_class("tool-approval-widget") + + def compose(self) -> ComposeResult: + MAX_APPROVAL_MSG_SIZE = 150 + + for key, value in self.data.items(): + value_str = str(value) + if len(value_str) > MAX_APPROVAL_MSG_SIZE: + hidden = len(value_str) - MAX_APPROVAL_MSG_SIZE + value_str = ( + value_str[:MAX_APPROVAL_MSG_SIZE] + f"… ({hidden} more characters)" + ) + yield Static( + f"{key}: {value_str}", markup=False, classes="approval-description" + ) + + +class ToolResultWidget(Static): + def __init__(self, data: dict, collapsed: bool = True) -> None: + super().__init__() + self.data = data + self.collapsed = collapsed + self.add_class("tool-result-widget") + + def compose(self) -> ComposeResult: + message = self.data.get("message", "") + + if self.collapsed: + yield Static(f"{message} (ctrl+o to expand.)", markup=False) + else: + yield Static(message, markup=False) + + if not self.collapsed and (details := self.data.get("details")): + for key, value in details.items(): + if value: + yield Static( + f"{key}: {value}", markup=False, classes="tool-result-detail" + ) + + +class BashApprovalWidget(ToolApprovalWidget): + def compose(self) -> ComposeResult: + command = self.data.get("command", "") + description = self.data.get("description", "") + + if description: + yield Static(description, markup=False, classes="approval-description") + yield Static("") + + yield Markdown(f"```bash\n{command}\n```") + + +class BashResultWidget(ToolResultWidget): + def compose(self) -> ComposeResult: + message = self.data.get("message", "") + + if self.collapsed: + yield Static(f"{message} (ctrl+o to expand.)", markup=False) + else: + yield Static(message, markup=False) + + if not self.collapsed and (details := self.data.get("details")): + for key, value in details.items(): + if value: + yield Static( + f"{key}: {value}", markup=False, classes="tool-result-detail" + ) + + +class WriteFileApprovalWidget(ToolApprovalWidget): + def compose(self) -> ComposeResult: + path = self.data.get("path", "") + content = self.data.get("content", "") + file_extension = self.data.get("file_extension", "text") + + yield Static(f"File: {path}", markup=False, classes="approval-description") + yield Static("") + + yield Markdown(f"```{file_extension}\n{content}\n```") + + +class WriteFileResultWidget(ToolResultWidget): + def compose(self) -> ComposeResult: + MAX_LINES = 10 + message = self.data.get("message", "") + + if self.collapsed: + yield Static(f"{message} (ctrl+o to expand.)", markup=False) + else: + yield Static(message, markup=False) + + if not self.collapsed: + if path := self.data.get("path"): + yield Static( + f"Path: {path}", markup=False, classes="tool-result-detail" + ) + + if bytes_written := self.data.get("bytes_written"): + yield Static( + f"Bytes: {bytes_written}", + markup=False, + classes="tool-result-detail", + ) + + if content := self.data.get("content"): + yield Static("") + file_extension = self.data.get("file_extension", "text") + + lines = content.split("\n") + total_lines = len(lines) + + if total_lines > MAX_LINES: + shown_lines = lines[:MAX_LINES] + remaining = total_lines - MAX_LINES + truncated_content = "\n".join( + shown_lines + [f"… ({remaining} more lines)"] + ) + yield Markdown(f"```{file_extension}\n{truncated_content}\n```") + else: + yield Markdown(f"```{file_extension}\n{content}\n```") + + +class SearchReplaceApprovalWidget(ToolApprovalWidget): + def compose(self) -> ComposeResult: + file_path = self.data.get("file_path", "") + diff_lines = self.data.get("diff_lines", []) + + yield Static(f"File: {file_path}", markup=False, classes="approval-description") + yield Static("") + + if diff_lines: + for line in diff_lines: + if line.startswith("---") or line.startswith("+++"): + yield Static(line, markup=False, classes="diff-header") + elif line.startswith("-"): + yield Static(line, markup=False, classes="diff-removed") + elif line.startswith("+"): + yield Static(line, markup=False, classes="diff-added") + elif line.startswith("@@"): + yield Static(line, markup=False, classes="diff-range") + else: + yield Static(line, markup=False, classes="diff-context") + + +class SearchReplaceResultWidget(ToolResultWidget): + def compose(self) -> ComposeResult: + message = self.data.get("message", "") + + if self.collapsed: + yield Static(f"{message} (ctrl+o to expand.)", markup=False) + else: + yield Static(message, markup=False) + + if not self.collapsed and (diff_lines := self.data.get("diff_lines")): + yield Static("") + for line in diff_lines: + if line.startswith("---") or line.startswith("+++"): + yield Static(line, markup=False, classes="diff-header") + elif line.startswith("-"): + yield Static(line, markup=False, classes="diff-removed") + elif line.startswith("+"): + yield Static(line, markup=False, classes="diff-added") + elif line.startswith("@@"): + yield Static(line, markup=False, classes="diff-range") + else: + yield Static(line, markup=False, classes="diff-context") + + +class TodoApprovalWidget(ToolApprovalWidget): + def compose(self) -> ComposeResult: + description = self.data.get("description", "") + if description: + yield Static(description, markup=False, classes="approval-description") + + +class TodoResultWidget(ToolResultWidget): + def compose(self) -> ComposeResult: + message = self.data.get("message", "") + + if self.collapsed: + yield Static(message, markup=False) + else: + yield Static(message, markup=False) + yield Static("") + + by_status = self.data.get("todos_by_status", {}) + if not any(by_status.values()): + yield Static("No todos", markup=False, classes="todo-empty") + return + + for status in ["in_progress", "pending", "completed", "cancelled"]: + todos = by_status.get(status, []) + for todo in todos: + content = todo.get("content", "") + icon = self._get_status_icon(status) + yield Static( + f"{icon} {content}", markup=False, classes=f"todo-{status}" + ) + + def _get_status_icon(self, status: str) -> str: + icons = {"pending": "☐", "in_progress": "☐", "completed": "☑", "cancelled": "☒"} + return icons.get(status, "☐") + + +class ReadFileApprovalWidget(ToolApprovalWidget): + def compose(self) -> ComposeResult: + for key, value in self.data.items(): + if value: + yield Static( + f"{key}: {value}", markup=False, classes="approval-description" + ) + + +class ReadFileResultWidget(ToolResultWidget): + def compose(self) -> ComposeResult: + MAX_LINES = 10 + message = self.data.get("message", "") + + if self.collapsed: + yield Static(f"{message} (ctrl+o to expand.)", markup=False) + else: + yield Static(message, markup=False) + + if self.collapsed: + return + + if path := self.data.get("path"): + yield Static(f"Path: {path}", markup=False, classes="tool-result-detail") + + if warnings := self.data.get("warnings"): + for warning in warnings: + yield Static( + f"⚠ {warning}", markup=False, classes="tool-result-warning" + ) + + if content := self.data.get("content"): + yield Static("") + file_extension = self.data.get("file_extension", "text") + + lines = content.split("\n") + total_lines = len(lines) + + if total_lines > MAX_LINES: + shown_lines = lines[:MAX_LINES] + remaining = total_lines - MAX_LINES + truncated_content = "\n".join( + shown_lines + [f"… ({remaining} more lines)"] + ) + yield Markdown(f"```{file_extension}\n{truncated_content}\n```") + else: + yield Markdown(f"```{file_extension}\n{content}\n```") + + +class GrepApprovalWidget(ToolApprovalWidget): + def compose(self) -> ComposeResult: + for key, value in self.data.items(): + if value: + yield Static( + f"{key}: {value!s}", classes="approval-description", markup=False + ) + + +class GrepResultWidget(ToolResultWidget): + def compose(self) -> ComposeResult: + MAX_LINES = 30 + message = self.data.get("message", "") + + if self.collapsed: + yield Static(f"{message} (ctrl+o to expand.)", markup=False) + else: + yield Static(message, markup=False) + + if self.collapsed: + return + + if warnings := self.data.get("warnings"): + for warning in warnings: + yield Static( + f"⚠ {warning}", classes="tool-result-warning", markup=False + ) + + if matches := self.data.get("matches"): + yield Static("") + + lines = matches.split("\n") + total_lines = len(lines) + + if total_lines > MAX_LINES: + shown_lines = lines[:MAX_LINES] + remaining = total_lines - MAX_LINES + truncated_content = "\n".join( + shown_lines + [f"… ({remaining} more lines)"] + ) + yield Markdown(f"```\n{truncated_content}\n```") + else: + yield Markdown(f"```\n{matches}\n```") diff --git a/vibe/cli/textual_ui/widgets/tools.py b/vibe/cli/textual_ui/widgets/tools.py new file mode 100644 index 0000000..0767f97 --- /dev/null +++ b/vibe/cli/textual_ui/widgets/tools.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +from textual.widgets import Static + +from vibe.cli.textual_ui.renderers import get_renderer +from vibe.cli.textual_ui.widgets.blinking_message import BlinkingMessage +from vibe.core.tools.ui import ToolUIDataAdapter +from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class ToolCallMessage(BlinkingMessage): + def __init__(self, event: ToolCallEvent) -> None: + self.event = event + super().__init__() + self.add_class("tool-call") + + def get_content(self) -> str: + if not self.event.tool_class: + return f"{self.event.tool_name}" + + adapter = ToolUIDataAdapter(self.event.tool_class) + display = adapter.get_call_display(self.event) + + return f"{display.summary}" + + +class ToolResultMessage(Static): + def __init__( + self, + event: ToolResultEvent, + call_widget: ToolCallMessage | None = None, + collapsed: bool = True, + ) -> None: + self.event = event + self.call_widget = call_widget + self.collapsed = collapsed + + super().__init__() + self.add_class("tool-result") + + async def on_mount(self) -> None: + if self.call_widget: + success = not self.event.error and not self.event.skipped + self.call_widget.stop_blinking(success=success) + await self.render_result() + + async def render_result(self) -> None: + await self.remove_children() + + if self.event.error: + self.add_class("error-text") + if self.collapsed: + self.update("Error. (ctrl+o to expand)") + else: + await self.mount(Static(f"Error: {self.event.error}", markup=False)) + return + + if self.event.skipped: + self.add_class("warning-text") + reason = self.event.skip_reason or "User skipped" + if self.collapsed: + self.update("Skipped. (ctrl+o to expand)") + else: + await self.mount(Static(f"Skipped: {reason}", markup=False)) + return + + self.remove_class("error-text") + self.remove_class("warning-text") + + adapter = ToolUIDataAdapter(self.event.tool_class) + display = adapter.get_result_display(self.event) + + renderer = get_renderer(self.event.tool_name) + widget_class, data = renderer.get_result_widget(display, self.collapsed) + + result_widget = widget_class(data, collapsed=self.collapsed) + await self.mount(result_widget) + + async def set_collapsed(self, collapsed: bool) -> None: + if self.collapsed != collapsed: + self.collapsed = collapsed + await self.render_result() + + async def toggle_collapsed(self) -> None: + self.collapsed = not self.collapsed + await self.render_result() diff --git a/vibe/cli/textual_ui/widgets/welcome.py b/vibe/cli/textual_ui/widgets/welcome.py new file mode 100644 index 0000000..702cdef --- /dev/null +++ b/vibe/cli/textual_ui/widgets/welcome.py @@ -0,0 +1,283 @@ +from __future__ import annotations + +from dataclasses import dataclass +from time import monotonic + +from rich.align import Align +from rich.console import Group +from rich.text import Text +from textual.color import Color +from textual.widgets import Static + +from vibe.core import __version__ +from vibe.core.config import VibeConfig + + +def hex_to_rgb(hex_color: str) -> tuple[int, int, int]: + normalized = hex_color.lstrip("#") + r, g, b = (int(normalized[i : i + 2], 16) for i in (0, 2, 4)) + return (r, g, b) + + +def rgb_to_hex(r: int, g: int, b: int) -> str: + return f"#{r:02x}{g:02x}{b:02x}" + + +def interpolate_color( + start_rgb: tuple[int, int, int], end_rgb: tuple[int, int, int], progress: float +) -> str: + progress = max(0.0, min(1.0, progress)) + r = int(start_rgb[0] + (end_rgb[0] - start_rgb[0]) * progress) + g = int(start_rgb[1] + (end_rgb[1] - start_rgb[1]) * progress) + b = int(start_rgb[2] + (end_rgb[2] - start_rgb[2]) * progress) + return rgb_to_hex(r, g, b) + + +@dataclass +class LineAnimationState: + progress: float = 0.0 + cached_color: str | None = None + cached_progress: float = -1.0 + rendered_color: str | None = None + + +class WelcomeBanner(Static): + FLASH_COLOR = "#FFFFFF" + TARGET_COLORS = ("#FFD800", "#FFAF00", "#FF8205", "#FA500F", "#E10500") + BORDER_TARGET_COLOR = "#b05800" + + LINE_ANIMATION_DURATION_MS = 200 + LINE_STAGGER_MS = 280 + FLASH_RESET_DURATION_MS = 400 + ANIMATION_TICK_INTERVAL = 0.1 + + COLOR_FLASH_MIDPOINT = 0.5 + COLOR_PHASE_SCALE = 2.0 + COLOR_CACHE_THRESHOLD = 0.001 + BORDER_PROGRESS_THRESHOLD = 0.01 + + BLOCK = "▇▇" + SPACE = " " + LOGO_TEXT_GAP = " " + + def __init__(self, config: VibeConfig) -> None: + super().__init__(" ") + self.config = config + self.animation_timer = None + self._animation_start_time: float | None = None + + self._cached_skeleton_color: str | None = None + self._cached_skeleton_rgb: tuple[int, int, int] | None = None + self._flash_rgb = hex_to_rgb(self.FLASH_COLOR) + self._target_rgbs = [hex_to_rgb(c) for c in self.TARGET_COLORS] + self._border_target_rgb = hex_to_rgb(self.BORDER_TARGET_COLOR) + + self._line_states = [LineAnimationState() for _ in self.TARGET_COLORS] + self.border_progress = 0.0 + self._cached_border_color: str | None = None + self._cached_border_progress = -1.0 + + self._line_duration = self.LINE_ANIMATION_DURATION_MS / 1000 + self._line_stagger = self.LINE_STAGGER_MS / 1000 + self._border_duration = self.FLASH_RESET_DURATION_MS / 1000 + self._line_start_times = [ + idx * self._line_stagger for idx in range(len(self.TARGET_COLORS)) + ] + self._all_lines_finish_time = ( + (len(self.TARGET_COLORS) - 1) * self.LINE_STAGGER_MS + + self.LINE_ANIMATION_DURATION_MS + ) / 1000 + + self._cached_text_lines: list[Text | None] = [None] * 7 + self._initialize_static_line_suffixes() + + def _initialize_static_line_suffixes(self) -> None: + self._static_line1_suffix = ( + f"{self.LOGO_TEXT_GAP}[b]Mistral Vibe v{__version__}[/]" + ) + self._static_line2_suffix = ( + f"{self.LOGO_TEXT_GAP}[dim]{self.config.active_model}[/]" + ) + mcp_count = len(self.config.mcp_servers) + model_count = len(self.config.models) + self._static_line3_suffix = f"{self.LOGO_TEXT_GAP}[dim]{model_count} models · {mcp_count} MCP servers[/]" + self._static_line5_suffix = ( + f"{self.LOGO_TEXT_GAP}[dim]{self.config.effective_workdir}[/]" + ) + block = (self.SPACE * 4) + self.LOGO_TEXT_GAP + self._static_line7 = f"{block}[dim]Type[/] [{self.BORDER_TARGET_COLOR}]/help[/] [dim]for more information[/]" + + @property + def skeleton_color(self) -> str: + return self._cached_skeleton_color or "#1e1e1e" + + @property + def skeleton_rgb(self) -> tuple[int, int, int]: + return self._cached_skeleton_rgb or hex_to_rgb("#1e1e1e") + + def on_mount(self) -> None: + if not self.config.disable_welcome_banner_animation: + self.call_after_refresh(self._init_after_styles) + + def _init_after_styles(self) -> None: + self._cache_skeleton_color() + self._cached_text_lines[5] = Text("") + self._cached_text_lines[6] = Text.from_markup(self._static_line7) + self._update_display() + self._start_animation() + + def _cache_skeleton_color(self) -> None: + try: + border = self.styles.border + if ( + hasattr(border, "top") + and isinstance(edge := border.top, tuple) + and len(edge) >= 2 # noqa: PLR2004 + and isinstance(color := edge[1], Color) + ): + self._cached_skeleton_color = color.hex + self._cached_skeleton_rgb = hex_to_rgb(color.hex) + return + except (AttributeError, TypeError): + pass + + self._cached_skeleton_color = "#1e1e1e" + self._cached_skeleton_rgb = hex_to_rgb("#1e1e1e") + + def _stop_timer(self) -> None: + if self.animation_timer: + try: + self.animation_timer.stop() + except Exception: + pass + self.animation_timer = None + + def on_unmount(self) -> None: + self._stop_timer() + + def _start_animation(self) -> None: + self._animation_start_time = monotonic() + + def tick() -> None: + if self._is_animation_complete(): + self._stop_timer() + return + if self._animation_start_time is None: + return + + elapsed = monotonic() - self._animation_start_time + updated_lines = self._advance_line_progress(elapsed) + border_updated = self._advance_border_progress(elapsed) + + if border_updated: + self._update_border_color() + if updated_lines or border_updated: + self._update_display() + + self.animation_timer = self.set_interval(self.ANIMATION_TICK_INTERVAL, tick) + + def _advance_line_progress(self, elapsed: float) -> bool: + any_updates = False + for line_idx, state in enumerate(self._line_states): + if state.progress >= 1.0: + continue + start_time = self._line_start_times[line_idx] + if elapsed < start_time: + continue + progress = min(1.0, (elapsed - start_time) / self._line_duration) + if progress > state.progress: + state.progress = progress + any_updates = True + return any_updates + + def _advance_border_progress(self, elapsed: float) -> bool: + if elapsed < self._all_lines_finish_time: + return False + + new_progress = min( + 1.0, (elapsed - self._all_lines_finish_time) / self._border_duration + ) + + if abs(new_progress - self.border_progress) > self.BORDER_PROGRESS_THRESHOLD: + self.border_progress = new_progress + return True + + return False + + def _is_animation_complete(self) -> bool: + return ( + all(state.progress >= 1.0 for state in self._line_states) + and self.border_progress >= 1.0 + ) + + def _update_border_color(self) -> None: + progress = self.border_progress + if abs(progress - self._cached_border_progress) < self.COLOR_CACHE_THRESHOLD: + return + + border_color = self._compute_color_for_progress( + progress, self._border_target_rgb + ) + self._cached_border_color = border_color + self._cached_border_progress = progress + self.styles.border = ("round", border_color) + + def _compute_color_for_progress( + self, progress: float, target_rgb: tuple[int, int, int] + ) -> str: + if progress <= 0: + return self.skeleton_color + + if progress <= self.COLOR_FLASH_MIDPOINT: + phase = progress * self.COLOR_PHASE_SCALE + return interpolate_color(self.skeleton_rgb, self._flash_rgb, phase) + + phase = (progress - self.COLOR_FLASH_MIDPOINT) * self.COLOR_PHASE_SCALE + return interpolate_color(self._flash_rgb, target_rgb, phase) + + def _update_display(self) -> None: + for idx in range(5): + self._update_colored_line(idx, idx) + + lines = [line if line else Text("") for line in self._cached_text_lines] + self.update(Align.center(Group(*lines))) + + def _get_color(self, line_idx: int) -> str: + state = self._line_states[line_idx] + if ( + abs(state.progress - state.cached_progress) < self.COLOR_CACHE_THRESHOLD + and state.cached_color + ): + return state.cached_color + + color = self._compute_color_for_progress( + state.progress, self._target_rgbs[line_idx] + ) + state.cached_color = color + state.cached_progress = state.progress + return color + + def _update_colored_line(self, slot_idx: int, line_idx: int) -> None: + color = self._get_color(line_idx) + state = self._line_states[line_idx] + + if color == state.rendered_color and self._cached_text_lines[slot_idx]: + return + + state.rendered_color = color + self._cached_text_lines[slot_idx] = Text.from_markup( + self._build_line(slot_idx, color) + ) + + def _build_line(self, line_idx: int, color: str) -> str: + B = self.BLOCK + S = self.SPACE + + patterns = [ + f"{S}[{color}]{B}[/]{S}{S}{S}[{color}]{B}[/]{S}{self._static_line1_suffix}", + f"{S}[{color}]{B}{B}[/]{S}[{color}]{B}{B}[/]{S}{self._static_line2_suffix}", + f"{S}[{color}]{B}{B}{B}{B}{B}[/]{S}{self._static_line3_suffix}", + f"{S}[{color}]{B}[/]{S}[{color}]{B}[/]{S}[{color}]{B}[/]{S}", + f"[{color}]{B}{B}{B}[/]{S}[{color}]{B}{B}{B}[/]{self._static_line5_suffix}", + ] + return patterns[line_idx] diff --git a/vibe/cli/update_notifier/__init__.py b/vibe/cli/update_notifier/__init__.py new file mode 100644 index 0000000..7727e0b --- /dev/null +++ b/vibe/cli/update_notifier/__init__.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from vibe.cli.update_notifier.fake_version_update_gateway import ( + FakeVersionUpdateGateway, +) +from vibe.cli.update_notifier.github_version_update_gateway import ( + GitHubVersionUpdateGateway, +) +from vibe.cli.update_notifier.version_update import ( + VersionUpdateError, + is_version_update_available, +) +from vibe.cli.update_notifier.version_update_gateway import ( + DEFAULT_GATEWAY_MESSAGES, + VersionUpdate, + VersionUpdateGateway, + VersionUpdateGatewayCause, + VersionUpdateGatewayError, +) + +__all__ = [ + "DEFAULT_GATEWAY_MESSAGES", + "FakeVersionUpdateGateway", + "GitHubVersionUpdateGateway", + "VersionUpdate", + "VersionUpdateError", + "VersionUpdateGateway", + "VersionUpdateGatewayCause", + "VersionUpdateGatewayError", + "is_version_update_available", +] diff --git a/vibe/cli/update_notifier/fake_version_update_gateway.py b/vibe/cli/update_notifier/fake_version_update_gateway.py new file mode 100644 index 0000000..8baac50 --- /dev/null +++ b/vibe/cli/update_notifier/fake_version_update_gateway.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from vibe.cli.update_notifier.version_update_gateway import ( + VersionUpdate, + VersionUpdateGateway, + VersionUpdateGatewayError, +) + + +class FakeVersionUpdateGateway(VersionUpdateGateway): + def __init__( + self, + update: VersionUpdate | None = None, + error: VersionUpdateGatewayError | None = None, + ) -> None: + self._update: VersionUpdate | None = update + self._error = error + self.fetch_update_calls = 0 + + async def fetch_update(self) -> VersionUpdate | None: + self.fetch_update_calls += 1 + if self._error is not None: + raise self._error + return self._update diff --git a/vibe/cli/update_notifier/github_version_update_gateway.py b/vibe/cli/update_notifier/github_version_update_gateway.py new file mode 100644 index 0000000..86bbbc4 --- /dev/null +++ b/vibe/cli/update_notifier/github_version_update_gateway.py @@ -0,0 +1,109 @@ +from __future__ import annotations + +import httpx + +from vibe.cli.update_notifier.version_update_gateway import ( + VersionUpdate, + VersionUpdateGateway, + VersionUpdateGatewayCause, + VersionUpdateGatewayError, +) + + +class GitHubVersionUpdateGateway(VersionUpdateGateway): + def __init__( + self, + owner: str, + repository: str, + *, + token: str | None = None, + client: httpx.AsyncClient | None = None, + timeout: float = 5.0, + base_url: str = "https://api.github.com", + ) -> None: + self._owner = owner + self._repository = repository + self._token = token + self._client = client + self._timeout = timeout + self._base_url = base_url.rstrip("/") + + async def fetch_update(self) -> VersionUpdate | None: + headers = { + "Accept": "application/vnd.github+json", + "User-Agent": "mistral-vibe-update-notifier", + } + if self._token: + headers["Authorization"] = f"Bearer {self._token}" + + request_path = f"/repos/{self._owner}/{self._repository}/releases" + + try: + if self._client is not None: + response = await self._client.get( + f"{self._base_url}{request_path}", + headers=headers, + timeout=self._timeout, + ) + else: + async with httpx.AsyncClient( + base_url=self._base_url, timeout=self._timeout + ) as client: + response = await client.get(request_path, headers=headers) + except httpx.RequestError as exc: + raise VersionUpdateGatewayError( + cause=VersionUpdateGatewayCause.REQUEST_FAILED + ) from exc + + rate_limit_remaining = response.headers.get("X-RateLimit-Remaining") + if response.status_code == httpx.codes.TOO_MANY_REQUESTS or ( + rate_limit_remaining is not None and rate_limit_remaining == "0" + ): + raise VersionUpdateGatewayError( + cause=VersionUpdateGatewayCause.TOO_MANY_REQUESTS + ) + + if response.status_code == httpx.codes.FORBIDDEN: + raise VersionUpdateGatewayError(cause=VersionUpdateGatewayCause.FORBIDDEN) + + if response.status_code == httpx.codes.NOT_FOUND: + raise VersionUpdateGatewayError( + cause=VersionUpdateGatewayCause.NOT_FOUND, + message="Unable to fetch the GitHub releases. Did you export a GITHUB_TOKEN environment variable?", + ) + + if response.is_error: + raise VersionUpdateGatewayError( + cause=VersionUpdateGatewayCause.ERROR_RESPONSE + ) + + try: + data = response.json() + except ValueError as exc: + raise VersionUpdateGatewayError( + cause=VersionUpdateGatewayCause.INVALID_RESPONSE + ) from exc + + if not data: + return None + + # pick the most recently published non-prerelease and non-draft release + # github "list releases" API most likely returns ordered results, but this is not guaranteed + for release in sorted( + data, key=lambda x: x.get("published_at") or "", reverse=True + ): + if release.get("prerelease") or release.get("draft"): + continue + if version := _extract_version(release.get("tag_name")): + return VersionUpdate(latest_version=version) + + return None + + +def _extract_version(tag_name: str | None) -> str | None: + if not tag_name: + return None + tag = tag_name.strip() + if not tag: + return None + return tag[1:] if tag.startswith(("v", "V")) else tag diff --git a/vibe/cli/update_notifier/version_update.py b/vibe/cli/update_notifier/version_update.py new file mode 100644 index 0000000..55445e0 --- /dev/null +++ b/vibe/cli/update_notifier/version_update.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from packaging.version import InvalidVersion, Version + +from vibe.cli.update_notifier.version_update_gateway import ( + DEFAULT_GATEWAY_MESSAGES, + VersionUpdate, + VersionUpdateGateway, + VersionUpdateGatewayCause, + VersionUpdateGatewayError, +) + + +class VersionUpdateError(Exception): + def __init__(self, message: str) -> None: + self.message = message + super().__init__(message) + + +def _parse_version(raw: str) -> Version | None: + try: + return Version(raw.replace("-", "+")) + except InvalidVersion: + return None + + +def _describe_gateway_error(error: VersionUpdateGatewayError) -> str: + if message := getattr(error, "user_message", None): + return message + + cause = getattr(error, "cause", VersionUpdateGatewayCause.UNKNOWN) + if isinstance(cause, VersionUpdateGatewayCause): + return DEFAULT_GATEWAY_MESSAGES.get( + cause, DEFAULT_GATEWAY_MESSAGES[VersionUpdateGatewayCause.UNKNOWN] + ) + + return DEFAULT_GATEWAY_MESSAGES[VersionUpdateGatewayCause.UNKNOWN] + + +async def is_version_update_available( + version_update_notifier: VersionUpdateGateway, current_version: str +) -> VersionUpdate | None: + try: + update = await version_update_notifier.fetch_update() + except VersionUpdateGatewayError as error: + raise VersionUpdateError(_describe_gateway_error(error)) from error + + if not update: + return None + + latest_version = _parse_version(update.latest_version) + current = _parse_version(current_version) + + if latest_version is None or current is None: + return None + + return update if latest_version > current else None diff --git a/vibe/cli/update_notifier/version_update_gateway.py b/vibe/cli/update_notifier/version_update_gateway.py new file mode 100644 index 0000000..9e5e60c --- /dev/null +++ b/vibe/cli/update_notifier/version_update_gateway.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +from dataclasses import dataclass +from enum import StrEnum, auto +from typing import Protocol, runtime_checkable + + +@dataclass(frozen=True, slots=True) +class VersionUpdate: + latest_version: str + + +class VersionUpdateGatewayCause(StrEnum): + @staticmethod + def _generate_next_value_( + name: str, start: int, count: int, last_values: list[str] + ) -> str: + return name.lower() + + TOO_MANY_REQUESTS = auto() + FORBIDDEN = auto() + NOT_FOUND = auto() + REQUEST_FAILED = auto() + ERROR_RESPONSE = auto() + INVALID_RESPONSE = auto() + UNKNOWN = auto() + + +DEFAULT_GATEWAY_MESSAGES: dict[VersionUpdateGatewayCause, str] = { + VersionUpdateGatewayCause.TOO_MANY_REQUESTS: "Rate limit exceeded while checking for updates.", + VersionUpdateGatewayCause.FORBIDDEN: "Request was forbidden while checking for updates.", + VersionUpdateGatewayCause.NOT_FOUND: "Unable to fetch the releases. Please check your permissions.", + VersionUpdateGatewayCause.REQUEST_FAILED: "Network error while checking for updates.", + VersionUpdateGatewayCause.ERROR_RESPONSE: "Unexpected response received while checking for updates.", + VersionUpdateGatewayCause.INVALID_RESPONSE: "Received an invalid response while checking for updates.", + VersionUpdateGatewayCause.UNKNOWN: "Unable to determine whether an update is available.", +} + + +class VersionUpdateGatewayError(Exception): + def __init__( + self, *, cause: VersionUpdateGatewayCause, message: str | None = None + ) -> None: + self.cause = cause + self.user_message = message + detail = message or DEFAULT_GATEWAY_MESSAGES.get( + cause, DEFAULT_GATEWAY_MESSAGES[VersionUpdateGatewayCause.UNKNOWN] + ) + super().__init__(detail) + + +@runtime_checkable +class VersionUpdateGateway(Protocol): + async def fetch_update(self) -> VersionUpdate | None: ... diff --git a/vibe/core/__init__.py b/vibe/core/__init__.py new file mode 100644 index 0000000..3c783b4 --- /dev/null +++ b/vibe/core/__init__.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +__all__ = ["__version__", "run_programmatic"] +__version__ = "0.1.0" + +from vibe.core.programmatic import run_programmatic diff --git a/vibe/core/agent.py b/vibe/core/agent.py new file mode 100644 index 0000000..da31cfd --- /dev/null +++ b/vibe/core/agent.py @@ -0,0 +1,984 @@ +from __future__ import annotations + +import asyncio +from collections import OrderedDict +from collections.abc import AsyncGenerator, Callable +from enum import StrEnum, auto +import time +from typing import Any, cast +from uuid import uuid4 + +from pydantic import BaseModel + +from vibe.core.config import VibeConfig +from vibe.core.interaction_logger import InteractionLogger +from vibe.core.llm.backend.factory import BACKEND_FACTORY +from vibe.core.llm.format import APIToolFormatHandler, ResolvedMessage +from vibe.core.llm.types import BackendLike +from vibe.core.middleware import ( + AutoCompactMiddleware, + ContextWarningMiddleware, + ConversationContext, + MiddlewareAction, + MiddlewarePipeline, + MiddlewareResult, + PriceLimitMiddleware, + ResetReason, + TurnLimitMiddleware, +) +from vibe.core.prompts import UtilityPrompt +from vibe.core.system_prompt import get_universal_system_prompt +from vibe.core.tools.base import ( + BaseTool, + ToolError, + ToolPermission, + ToolPermissionError, +) +from vibe.core.tools.manager import ToolManager +from vibe.core.types import ( + AgentStats, + ApprovalCallback, + AssistantEvent, + BaseEvent, + CompactEndEvent, + CompactStartEvent, + LLMChunk, + LLMMessage, + Role, + SyncApprovalCallback, + ToolCall, + ToolCallEvent, + ToolResultEvent, +) +from vibe.core.utils import ( + TOOL_ERROR_TAG, + VIBE_STOP_EVENT_TAG, + ApprovalResponse, + CancellationReason, + get_user_agent, + get_user_cancellation_message, + is_user_cancellation_event, +) + + +class ToolExecutionResponse(StrEnum): + SKIP = auto() + EXECUTE = auto() + + +class ToolDecision(BaseModel): + verdict: ToolExecutionResponse + feedback: str | None = None + + +class AgentError(Exception): + """Base exception for Agent errors.""" + + +class AgentStateError(AgentError): + """Raised when agent is in an invalid state.""" + + +class LLMResponseError(AgentError): + """Raised when LLM response is malformed or missing expected data.""" + + +class Agent: + def __init__( + self, + config: VibeConfig, + auto_approve: bool = False, + message_observer: Callable[[LLMMessage], None] | None = None, + max_turns: int | None = None, + max_price: float | None = None, + backend: BackendLike | None = None, + enable_streaming: bool = False, + ) -> None: + self.config = config + + self.tool_manager = ToolManager(config) + self.format_handler = APIToolFormatHandler() + + self.backend_factory = lambda: backend or self._select_backend() + self.backend = self.backend_factory() + + self.message_observer = message_observer + self._last_observed_message_index: int = 0 + self.middleware_pipeline = MiddlewarePipeline() + self.enable_streaming = enable_streaming + self._setup_middleware(max_turns, max_price) + + system_prompt = get_universal_system_prompt(self.tool_manager, config) + + self.messages = [LLMMessage(role=Role.system, content=system_prompt)] + + if self.message_observer: + self.message_observer(self.messages[0]) + self._last_observed_message_index = 1 + + self.stats = AgentStats() + try: + active_model = config.get_active_model() + self.stats.input_price_per_million = active_model.input_price + self.stats.output_price_per_million = active_model.output_price + except ValueError: + pass + + self.auto_approve = auto_approve + self.approval_callback: ApprovalCallback | None = None + + self.session_id = str(uuid4()) + + self.interaction_logger = InteractionLogger( + config.session_logging, + self.session_id, + auto_approve, + config.effective_workdir, + ) + + self._last_chunk: LLMChunk | None = None + + def _select_backend(self) -> BackendLike: + active_model = self.config.get_active_model() + provider = self.config.get_provider_for_model(active_model) + timeout = self.config.api_timeout + return BACKEND_FACTORY[provider.backend](provider=provider, timeout=timeout) + + def add_message(self, message: LLMMessage) -> None: + self.messages.append(message) + + def _flush_new_messages(self) -> None: + if not self.message_observer: + return + + if self._last_observed_message_index >= len(self.messages): + return + + for msg in self.messages[self._last_observed_message_index :]: + self.message_observer(msg) + self._last_observed_message_index = len(self.messages) + + async def act(self, msg: str) -> AsyncGenerator[BaseEvent]: + self._clean_message_history() + async for event in self._conversation_loop(msg): + yield event + + def _setup_middleware(self, max_turns: int | None, max_price: float | None) -> None: + self.middleware_pipeline.clear() + + if max_turns is not None: + self.middleware_pipeline.add(TurnLimitMiddleware(max_turns)) + + if max_price is not None: + self.middleware_pipeline.add(PriceLimitMiddleware(max_price)) + + if self.config.auto_compact_threshold > 0: + self.middleware_pipeline.add( + AutoCompactMiddleware(self.config.auto_compact_threshold) + ) + if self.config.context_warnings: + self.middleware_pipeline.add( + ContextWarningMiddleware(0.5, self.config.auto_compact_threshold) + ) + + async def _handle_middleware_result( + self, result: MiddlewareResult + ) -> AsyncGenerator[BaseEvent]: + match result.action: + case MiddlewareAction.STOP: + yield AssistantEvent( + content=f"<{VIBE_STOP_EVENT_TAG}>{result.reason}", + prompt_tokens=0, + completion_tokens=0, + session_total_tokens=self.stats.session_total_llm_tokens, + last_turn_duration=0, + tokens_per_second=0, + stopped_by_middleware=True, + ) + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + + case MiddlewareAction.INJECT_MESSAGE: + if result.message and len(self.messages) > 0: + last_msg = self.messages[-1] + if last_msg.content: + last_msg.content += f"\n\n{result.message}" + else: + last_msg.content = result.message + + case MiddlewareAction.COMPACT: + old_tokens = result.metadata.get( + "old_tokens", self.stats.context_tokens + ) + threshold = result.metadata.get( + "threshold", self.config.auto_compact_threshold + ) + + yield CompactStartEvent( + current_context_tokens=old_tokens, threshold=threshold + ) + + summary = await self.compact() + + yield CompactEndEvent( + old_context_tokens=old_tokens, + new_context_tokens=self.stats.context_tokens, + summary_length=len(summary), + ) + + case MiddlewareAction.CONTINUE: + pass + + def _get_context(self) -> ConversationContext: + return ConversationContext( + messages=self.messages, stats=self.stats, config=self.config + ) + + async def _conversation_loop(self, user_msg: str) -> AsyncGenerator[BaseEvent]: + self.messages.append(LLMMessage(role=Role.user, content=user_msg)) + self.stats.steps += 1 + + try: + should_break_loop = False + while not should_break_loop: + result = await self.middleware_pipeline.run_before_turn( + self._get_context() + ) + + async for event in self._handle_middleware_result(result): + yield event + + if result.action == MiddlewareAction.STOP: + self._flush_new_messages() + return + + self.stats.steps += 1 + user_cancelled = False + async for event in self._perform_llm_turn(): + if is_user_cancellation_event(event): + user_cancelled = True + yield event + + last_message = self.messages[-1] + should_break_loop = ( + last_message.role != Role.tool + and self._last_chunk is not None + and self._last_chunk.finish_reason is not None + ) + + self._flush_new_messages() + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + + if user_cancelled: + self._flush_new_messages() + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + return + + after_result = await self.middleware_pipeline.run_after_turn( + self._get_context() + ) + + async for event in self._handle_middleware_result(after_result): + yield event + + if after_result.action == MiddlewareAction.STOP: + self._flush_new_messages() + return + + self._flush_new_messages() + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + + except Exception: + self._flush_new_messages() + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + raise + + async def _perform_llm_turn( + self, + ) -> AsyncGenerator[AssistantEvent | ToolCallEvent | ToolResultEvent]: + if self.enable_streaming: + async for event in self._stream_assistant_events(): + yield event + else: + assistant_event = await self._get_assistant_event() + if assistant_event.content: + yield assistant_event + + last_message = self.messages[-1] + last_chunk = self._last_chunk + if last_chunk is None or last_chunk.usage is None: + raise LLMResponseError("LLM response missing chunk or usage data") + + parsed = self.format_handler.parse_message(last_message) + resolved = self.format_handler.resolve_tool_calls( + parsed, self.tool_manager, self.config + ) + + if last_chunk.usage.completion_tokens > 0 and self.stats.last_turn_duration > 0: + self.stats.tokens_per_second = ( + last_chunk.usage.completion_tokens / self.stats.last_turn_duration + ) + + if not resolved.tool_calls and not resolved.failed_calls: + return + + async for event in self._handle_tool_calls(resolved): + yield event + + def _create_assistant_event( + self, content: str, chunk: LLMChunk | None + ) -> AssistantEvent: + return AssistantEvent( + content=content, + prompt_tokens=chunk.usage.prompt_tokens if chunk and chunk.usage else 0, + completion_tokens=chunk.usage.completion_tokens + if chunk and chunk.usage + else 0, + session_total_tokens=self.stats.session_total_llm_tokens, + last_turn_duration=self.stats.last_turn_duration, + tokens_per_second=self.stats.tokens_per_second, + ) + + async def _stream_assistant_events(self) -> AsyncGenerator[AssistantEvent]: + chunks: list[LLMChunk] = [] + content_buffer = "" + chunks_with_content = 0 + BATCH_SIZE = 5 + + async for chunk in self._chat_streaming(): + chunks.append(chunk) + + if chunk.message.tool_calls and chunk.finish_reason is None: + if chunk.message.content: + content_buffer += chunk.message.content + chunks_with_content += 1 + + if content_buffer: + yield self._create_assistant_event(content_buffer, chunk) + content_buffer = "" + chunks_with_content = 0 + continue + + if chunk.message.content: + content_buffer += chunk.message.content + chunks_with_content += 1 + + if chunks_with_content >= BATCH_SIZE: + yield self._create_assistant_event(content_buffer, chunk) + content_buffer = "" + chunks_with_content = 0 + + if content_buffer: + last_chunk = chunks[-1] if chunks else None + yield self._create_assistant_event(content_buffer, last_chunk) + + full_content = "" + full_tool_calls_map = OrderedDict[int, ToolCall]() + for chunk in chunks: + full_content += chunk.message.content or "" + if not chunk.message.tool_calls: + continue + + for tc in chunk.message.tool_calls: + if tc.index is None: + raise LLMResponseError("Tool call chunk missing index") + if tc.index not in full_tool_calls_map: + full_tool_calls_map[tc.index] = tc + else: + new_args_str = ( + full_tool_calls_map[tc.index].function.arguments or "" + ) + (tc.function.arguments or "") + full_tool_calls_map[tc.index].function.arguments = new_args_str + + full_tool_calls = list(full_tool_calls_map.values()) or None + last_message = LLMMessage( + role=Role.assistant, content=full_content, tool_calls=full_tool_calls + ) + self.messages.append(last_message) + finish_reason = next( + (c.finish_reason for c in chunks if c.finish_reason is not None), None + ) + self._last_chunk = LLMChunk( + message=last_message, usage=chunks[-1].usage, finish_reason=finish_reason + ) + + async def _get_assistant_event(self) -> AssistantEvent: + llm_result = await self._chat() + if llm_result.usage is None: + raise LLMResponseError( + "Usage data missing in non-streaming completion response" + ) + self._last_chunk = llm_result + assistant_msg = llm_result.message + self.messages.append(assistant_msg) + + return AssistantEvent( + content=assistant_msg.content or "", + prompt_tokens=llm_result.usage.prompt_tokens, + completion_tokens=llm_result.usage.completion_tokens, + session_total_tokens=self.stats.session_total_llm_tokens, + last_turn_duration=self.stats.last_turn_duration, + tokens_per_second=self.stats.tokens_per_second, + ) + + async def _handle_tool_calls( # noqa: PLR0915 + self, resolved: ResolvedMessage + ) -> AsyncGenerator[ToolCallEvent | ToolResultEvent]: + for failed in resolved.failed_calls: + error_msg = f"<{TOOL_ERROR_TAG}>{failed.tool_name}: {failed.error}" + + yield ToolResultEvent( + tool_name=failed.tool_name, + tool_class=None, + error=error_msg, + tool_call_id=failed.call_id, + ) + + self.stats.tool_calls_failed += 1 + self.messages.append( + self.format_handler.create_failed_tool_response_message( + failed, error_msg + ) + ) + + for tool_call in resolved.tool_calls: + tool_call_id = tool_call.call_id + + yield ToolCallEvent( + tool_name=tool_call.tool_name, + tool_class=tool_call.tool_class, + args=tool_call.validated_args, + tool_call_id=tool_call_id, + ) + + try: + tool_instance = self.tool_manager.get(tool_call.tool_name) + except Exception as exc: + error_msg = f"Error getting tool '{tool_call.tool_name}': {exc}" + yield ToolResultEvent( + tool_name=tool_call.tool_name, + tool_class=tool_call.tool_class, + error=error_msg, + tool_call_id=tool_call_id, + ) + self.messages.append( + LLMMessage.model_validate( + self.format_handler.create_tool_response_message( + tool_call, error_msg + ) + ) + ) + continue + + decision = await self._should_execute_tool( + tool_instance, tool_call.args_dict, tool_call_id + ) + + if decision.verdict == ToolExecutionResponse.SKIP: + self.stats.tool_calls_rejected += 1 + skip_reason = decision.feedback or str( + get_user_cancellation_message( + CancellationReason.TOOL_SKIPPED, tool_call.tool_name + ) + ) + + yield ToolResultEvent( + tool_name=tool_call.tool_name, + tool_class=tool_call.tool_class, + skipped=True, + skip_reason=skip_reason, + tool_call_id=tool_call_id, + ) + + self.messages.append( + LLMMessage.model_validate( + self.format_handler.create_tool_response_message( + tool_call, skip_reason + ) + ) + ) + continue + + self.stats.tool_calls_agreed += 1 + + try: + start_time = time.perf_counter() + result_model = await tool_instance.invoke(**tool_call.args_dict) + duration = time.perf_counter() - start_time + + text = "\n".join( + f"{k}: {v}" for k, v in result_model.model_dump().items() + ) + + self.messages.append( + LLMMessage.model_validate( + self.format_handler.create_tool_response_message( + tool_call, text + ) + ) + ) + + yield ToolResultEvent( + tool_name=tool_call.tool_name, + tool_class=tool_call.tool_class, + result=result_model, + duration=duration, + tool_call_id=tool_call_id, + ) + + self.stats.tool_calls_succeeded += 1 + + except asyncio.CancelledError: + cancel = str( + get_user_cancellation_message(CancellationReason.TOOL_INTERRUPTED) + ) + yield ToolResultEvent( + tool_name=tool_call.tool_name, + tool_class=tool_call.tool_class, + error=cancel, + tool_call_id=tool_call_id, + ) + self.messages.append( + LLMMessage.model_validate( + self.format_handler.create_tool_response_message( + tool_call, cancel + ) + ) + ) + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + raise + + except KeyboardInterrupt: + cancel = str( + get_user_cancellation_message(CancellationReason.TOOL_INTERRUPTED) + ) + yield ToolResultEvent( + tool_name=tool_call.tool_name, + tool_class=tool_call.tool_class, + error=cancel, + tool_call_id=tool_call_id, + ) + self.messages.append( + LLMMessage.model_validate( + self.format_handler.create_tool_response_message( + tool_call, cancel + ) + ) + ) + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + raise + + except (ToolError, ToolPermissionError) as exc: + error_msg = f"<{TOOL_ERROR_TAG}>{tool_instance.get_name()} failed: {exc}" + + yield ToolResultEvent( + tool_name=tool_call.tool_name, + tool_class=tool_call.tool_class, + error=error_msg, + tool_call_id=tool_call_id, + ) + + if isinstance(exc, ToolPermissionError): + self.stats.tool_calls_agreed -= 1 + self.stats.tool_calls_rejected += 1 + else: + self.stats.tool_calls_failed += 1 + self.messages.append( + LLMMessage.model_validate( + self.format_handler.create_tool_response_message( + tool_call, error_msg + ) + ) + ) + continue + + async def _chat(self, max_tokens: int | None = None) -> LLMChunk: + active_model = self.config.get_active_model() + provider = self.config.get_provider_for_model(active_model) + + available_tools = self.format_handler.get_available_tools( + self.tool_manager, self.config + ) + tool_choice = self.format_handler.get_tool_choice() + + try: + start_time = time.perf_counter() + + async with self.backend as backend: + result = await backend.complete( + model=active_model, + messages=self.messages, + temperature=active_model.temperature, + tools=available_tools, + tool_choice=tool_choice, + extra_headers={ + "User-Agent": get_user_agent(), + "x-affinity": self.session_id, + }, + max_tokens=max_tokens, + ) + + end_time = time.perf_counter() + if result.usage is None: + raise LLMResponseError( + "Usage data missing in non-streaming completion response" + ) + + self.stats.last_turn_duration = end_time - start_time + self.stats.last_turn_prompt_tokens = result.usage.prompt_tokens + self.stats.last_turn_completion_tokens = result.usage.completion_tokens + self.stats.session_prompt_tokens += result.usage.prompt_tokens + self.stats.session_completion_tokens += result.usage.completion_tokens + self.stats.context_tokens = ( + result.usage.prompt_tokens + result.usage.completion_tokens + ) + + processed_message = self.format_handler.process_api_response_message( + result.message + ) + + return LLMChunk( + message=processed_message, + usage=result.usage, + finish_reason=result.finish_reason, + ) + + except Exception as e: + raise RuntimeError( + f"API error from {provider.name} (model: {active_model.name}): {e}" + ) from e + + async def _chat_streaming( + self, max_tokens: int | None = None + ) -> AsyncGenerator[LLMChunk]: + active_model = self.config.get_active_model() + provider = self.config.get_provider_for_model(active_model) + + available_tools = self.format_handler.get_available_tools( + self.tool_manager, self.config + ) + tool_choice = self.format_handler.get_tool_choice() + try: + start_time = time.perf_counter() + last_chunk = None + async with self.backend as backend: + async for chunk in backend.complete_streaming( + model=active_model, + messages=self.messages, + temperature=active_model.temperature, + tools=available_tools, + tool_choice=tool_choice, + extra_headers={ + "User-Agent": get_user_agent(), + "x-affinity": self.session_id, + }, + max_tokens=max_tokens, + ): + last_chunk = chunk + processed_message = ( + self.format_handler.process_api_response_message(chunk.message) + ) + yield LLMChunk( + message=processed_message, + usage=chunk.usage, + finish_reason=chunk.finish_reason, + ) + + end_time = time.perf_counter() + if last_chunk is None: + raise LLMResponseError("Streamed completion returned no chunks") + if last_chunk.usage is None: + raise LLMResponseError( + "Usage data missing in final chunk of streamed completion" + ) + + self.stats.last_turn_duration = end_time - start_time + self.stats.last_turn_prompt_tokens = last_chunk.usage.prompt_tokens + self.stats.last_turn_completion_tokens = last_chunk.usage.completion_tokens + self.stats.session_prompt_tokens += last_chunk.usage.prompt_tokens + self.stats.session_completion_tokens += last_chunk.usage.completion_tokens + self.stats.context_tokens = ( + last_chunk.usage.prompt_tokens + last_chunk.usage.completion_tokens + ) + + except Exception as e: + raise RuntimeError( + f"API error from {provider.name} (model: {active_model.name}): {e}" + ) from e + + async def _should_execute_tool( + self, tool: BaseTool, args: dict[str, Any], tool_call_id: str + ) -> ToolDecision: + if self.auto_approve: + return ToolDecision(verdict=ToolExecutionResponse.EXECUTE) + + args_model, _ = tool._get_tool_args_results() + validated_args = args_model.model_validate(args) + + allowlist_denylist_result = tool.check_allowlist_denylist(validated_args) + if allowlist_denylist_result == ToolPermission.ALWAYS: + return ToolDecision(verdict=ToolExecutionResponse.EXECUTE) + elif allowlist_denylist_result == ToolPermission.NEVER: + denylist_patterns = tool.config.denylist + denylist_str = ", ".join(repr(pattern) for pattern in denylist_patterns) + return ToolDecision( + verdict=ToolExecutionResponse.SKIP, + feedback=f"Tool '{tool.get_name()}' blocked by denylist: [{denylist_str}]", + ) + + tool_name = tool.get_name() + perm = self.tool_manager.get_tool_config(tool_name).permission + + if perm is ToolPermission.ALWAYS: + return ToolDecision(verdict=ToolExecutionResponse.EXECUTE) + if perm is ToolPermission.NEVER: + return ToolDecision( + verdict=ToolExecutionResponse.SKIP, + feedback=f"Tool '{tool_name}' is permanently disabled", + ) + + return await self._ask_approval(tool_name, args, tool_call_id) + + async def _ask_approval( + self, tool_name: str, args: dict[str, Any], tool_call_id: str + ) -> ToolDecision: + if not self.approval_callback: + return ToolDecision( + verdict=ToolExecutionResponse.SKIP, + feedback="Tool execution not permitted.", + ) + if asyncio.iscoroutinefunction(self.approval_callback): + response, feedback = await self.approval_callback( + tool_name, args, tool_call_id + ) + else: + sync_callback = cast(SyncApprovalCallback, self.approval_callback) + response, feedback = sync_callback(tool_name, args, tool_call_id) + + match response: + case ApprovalResponse.ALWAYS: + self.auto_approve = True + return ToolDecision( + verdict=ToolExecutionResponse.EXECUTE, feedback=feedback + ) + case ApprovalResponse.YES: + return ToolDecision( + verdict=ToolExecutionResponse.EXECUTE, feedback=feedback + ) + case _: + return ToolDecision( + verdict=ToolExecutionResponse.SKIP, feedback=feedback + ) + + def _clean_message_history(self) -> None: + ACCEPTABLE_HISTORY_SIZE = 2 + if len(self.messages) < ACCEPTABLE_HISTORY_SIZE: + return + self._fill_missing_tool_responses() + self._ensure_assistant_after_tools() + + def _fill_missing_tool_responses(self) -> None: + i = 1 + while i < len(self.messages): # noqa: PLR1702 + msg = self.messages[i] + + if msg.role == "assistant" and msg.tool_calls: + expected_responses = len(msg.tool_calls) + + if expected_responses > 0: + actual_responses = 0 + j = i + 1 + while j < len(self.messages) and self.messages[j].role == "tool": + actual_responses += 1 + j += 1 + + if actual_responses < expected_responses: + insertion_point = i + 1 + actual_responses + + for call_idx in range(actual_responses, expected_responses): + tool_call_data = msg.tool_calls[call_idx] + + empty_response = LLMMessage( + role=Role.tool, + tool_call_id=tool_call_data.id or "", + name=(tool_call_data.function.name or "") + if tool_call_data.function + else "", + content=str( + get_user_cancellation_message( + CancellationReason.TOOL_NO_RESPONSE + ) + ), + ) + + self.messages.insert(insertion_point, empty_response) + insertion_point += 1 + + i = i + 1 + expected_responses + continue + + i += 1 + + def _ensure_assistant_after_tools(self) -> None: + MIN_MESSAGE_SIZE = 2 + if len(self.messages) < MIN_MESSAGE_SIZE: + return + + last_msg = self.messages[-1] + if last_msg.role is Role.tool: + empty_assistant_msg = LLMMessage(role=Role.assistant, content="Understood.") + self.messages.append(empty_assistant_msg) + + def _reset_session(self) -> None: + self.session_id = str(uuid4()) + self.interaction_logger.reset_session(self.session_id) + + def set_approval_callback(self, callback: ApprovalCallback) -> None: + self.approval_callback = callback + + async def clear_history(self) -> None: + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + self.messages = self.messages[:1] + + self.stats = AgentStats() + + try: + active_model = self.config.get_active_model() + self.stats.update_pricing( + active_model.input_price, active_model.output_price + ) + except ValueError: + pass + + self.middleware_pipeline.reset() + self.tool_manager.reset_all() + self._reset_session() + + async def compact(self) -> str: + try: + self._clean_message_history() + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + + last_user_message = None + for msg in reversed(self.messages): + if msg.role == Role.user: + last_user_message = msg.content + break + + summary_request = UtilityPrompt.COMPACT.read() + self.messages.append(LLMMessage(role=Role.user, content=summary_request)) + self.stats.steps += 1 + + summary_result = await self._chat() + if summary_result.usage is None: + raise LLMResponseError( + "Usage data missing in compaction summary response" + ) + summary_content = summary_result.message.content or "" + + if last_user_message: + summary_content += ( + f"\n\nLast request from user was: {last_user_message}" + ) + + system_message = self.messages[0] + summary_message = LLMMessage(role=Role.user, content=summary_content) + self.messages = [system_message, summary_message] + + active_model = self.config.get_active_model() + + async with self.backend as backend: + actual_context_tokens = await backend.count_tokens( + model=active_model, + messages=self.messages, + tools=self.format_handler.get_available_tools( + self.tool_manager, self.config + ), + extra_headers={"User-Agent": get_user_agent()}, + ) + + self.stats.context_tokens = actual_context_tokens + + self._reset_session() + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + + self.middleware_pipeline.reset(reset_reason=ResetReason.COMPACT) + + return summary_content or "" + + except Exception: + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + raise + + async def reload_with_initial_messages( + self, + config: VibeConfig | None = None, + max_turns: int | None = None, + max_price: float | None = None, + ) -> None: + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) + + preserved_messages = self.messages[1:] if len(self.messages) > 1 else [] + old_system_prompt = self.messages[0].content if len(self.messages) > 0 else "" + + if config is not None: + self.config = config + self.backend = self.backend_factory() + + self.tool_manager = ToolManager(self.config) + + new_system_prompt = get_universal_system_prompt(self.tool_manager, self.config) + self.messages = [LLMMessage(role=Role.system, content=new_system_prompt)] + did_system_prompt_change = old_system_prompt != new_system_prompt + + if preserved_messages: + self.messages.extend(preserved_messages) + + if len(self.messages) == 1 or did_system_prompt_change: + self.stats.reset_context_state() + + try: + active_model = self.config.get_active_model() + self.stats.update_pricing( + active_model.input_price, active_model.output_price + ) + except ValueError: + pass + + self._last_observed_message_index = 0 + + self._setup_middleware(max_turns, max_price) + + if self.message_observer: + for msg in self.messages: + self.message_observer(msg) + self._last_observed_message_index = len(self.messages) + + self.tool_manager.reset_all() + + await self.interaction_logger.save_interaction( + self.messages, self.stats, self.config, self.tool_manager + ) diff --git a/vibe/core/autocompletion/__init__.py b/vibe/core/autocompletion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/core/autocompletion/completers.py b/vibe/core/autocompletion/completers.py new file mode 100644 index 0000000..0d7ff19 --- /dev/null +++ b/vibe/core/autocompletion/completers.py @@ -0,0 +1,247 @@ +from __future__ import annotations + +from pathlib import Path +from typing import NamedTuple + +from vibe.core.autocompletion.file_indexer import FileIndexer, IndexEntry +from vibe.core.autocompletion.fuzzy import fuzzy_match + +DEFAULT_MAX_ENTRIES_TO_PROCESS = 32000 +DEFAULT_TARGET_MATCHES = 100 + + +class Completer: + def get_completions(self, text: str, cursor_pos: int) -> list[str]: + return [] + + def get_completion_items(self, text: str, cursor_pos: int) -> list[tuple[str, str]]: + return [ + (completion, "") for completion in self.get_completions(text, cursor_pos) + ] + + def get_replacement_range( + self, text: str, cursor_pos: int + ) -> tuple[int, int] | None: + return None + + +class CommandCompleter(Completer): + def __init__(self, commands: list[tuple[str, str]]) -> None: + aliases_with_descriptions: dict[str, str] = {} + for alias, description in commands: + aliases_with_descriptions[alias] = description + + self._descriptions = aliases_with_descriptions + self._aliases: list[str] = list(aliases_with_descriptions.keys()) + + def get_completions(self, text: str, cursor_pos: int) -> list[str]: + if not text.startswith("/"): + return [] + + word = text[1:cursor_pos].lower() + search_str = "/" + word + return [ + alias for alias in self._aliases if alias.lower().startswith(search_str) + ] + + def get_completion_items(self, text: str, cursor_pos: int) -> list[tuple[str, str]]: + completions = self.get_completions(text, cursor_pos) + return [(alias, self._descriptions.get(alias, "")) for alias in completions] + + def get_replacement_range( + self, text: str, cursor_pos: int + ) -> tuple[int, int] | None: + if text.startswith("/"): + return (0, cursor_pos) + return None + + +class PathCompleter(Completer): + def __init__( + self, + max_entries_to_process: int = DEFAULT_MAX_ENTRIES_TO_PROCESS, + target_matches: int = DEFAULT_TARGET_MATCHES, + ) -> None: + self._indexer = FileIndexer() + self._max_entries_to_process = max_entries_to_process + self._target_matches = target_matches + + class _SearchContext(NamedTuple): + suffix: str + search_pattern: str + path_prefix: str + immediate_only: bool + + def _extract_partial(self, before_cursor: str) -> str | None: + if "@" not in before_cursor: + return None + + at_index = before_cursor.rfind("@") + fragment = before_cursor[at_index + 1 :] + + if " " in fragment: + return None + + return fragment + + def _build_search_context(self, partial_path: str) -> _SearchContext: + suffix = partial_path.split("/")[-1] + + if not partial_path: + # "@" => show top-level dir and files + return self._SearchContext( + search_pattern="", path_prefix="", suffix=suffix, immediate_only=True + ) + + if partial_path.endswith("/"): + # "@something/" => list immediate children + return self._SearchContext( + search_pattern="", + path_prefix=partial_path, + suffix=suffix, + immediate_only=True, + ) + + return self._SearchContext( + # => run fuzzy search across the index + search_pattern=partial_path, + path_prefix="", + suffix=suffix, + immediate_only=False, + ) + + def _matches_prefix(self, entry: IndexEntry, context: _SearchContext) -> bool: + path_str = entry.rel + + if context.path_prefix: + prefix_without_slash = context.path_prefix.rstrip("/") + prefix_with_slash = f"{prefix_without_slash}/" + + if path_str == prefix_without_slash and entry.is_dir: + # do not suggest the dir itself (e.g. "@src/" => don't suggest "@src/") + return False + + if path_str.startswith(prefix_with_slash): + after_prefix = path_str[len(prefix_with_slash) :] + else: + idx = path_str.find(prefix_with_slash) + if idx == -1 or (idx > 0 and path_str[idx - 1] != "/"): + return False + after_prefix = path_str[idx + len(prefix_with_slash) :] + + # only suggest files/dirs that are immediate children of the prefix + return bool(after_prefix) and "/" not in after_prefix + + if context.immediate_only and "/" in path_str: + # when user just typed "@", only show top-level entries + return False + + # entry matches the prefix: let the fuzzy matcher decide if it's a good match + return True + + def _is_visible(self, entry: IndexEntry, context: _SearchContext) -> bool: + return not (entry.name.startswith(".") and not context.suffix.startswith(".")) + + def _format_label(self, entry: IndexEntry) -> str: + suffix = "/" if entry.is_dir else "" + return f"@{entry.rel}{suffix}" + + def _score_matches( + self, entries: list[IndexEntry], context: _SearchContext + ) -> list[tuple[str, float]]: + scored_matches: list[tuple[str, float]] = [] + MAX_MATCHES = 50 + + for i, entry in enumerate(entries): + if i >= self._max_entries_to_process: + break + + if not self._matches_prefix(entry, context): + continue + + if not self._is_visible(entry, context): + continue + + label = self._format_label(entry) + + if not context.search_pattern: + scored_matches.append((label, 0.0)) + if len(scored_matches) >= self._target_matches: + break + continue + + match_result = fuzzy_match( + context.search_pattern, entry.rel, entry.rel_lower + ) + if match_result.matched: + scored_matches.append((label, match_result.score)) + if ( + len(scored_matches) >= self._target_matches + and match_result.score > MAX_MATCHES + ): + break + + scored_matches.sort(key=lambda x: (-x[1], x[0])) + return scored_matches + + def _collect_matches(self, text: str, cursor_pos: int) -> list[str]: + before_cursor = text[:cursor_pos] + partial_path = self._extract_partial(before_cursor) + if partial_path is None: + return [] + + context = self._build_search_context(partial_path) + + try: + # TODO (Vince): doing the assumption that "." is the root directory... Reliable? + file_index = self._indexer.get_index(Path(".")) + except (OSError, RuntimeError): + return [] + + scored_matches = self._score_matches(file_index, context) + return [path for path, _ in scored_matches] + + def get_completions(self, text: str, cursor_pos: int) -> list[str]: + return self._collect_matches(text, cursor_pos) + + def get_completion_items(self, text: str, cursor_pos: int) -> list[tuple[str, str]]: + matches = self._collect_matches(text, cursor_pos) + return [(completion, "") for completion in matches] + + def get_replacement_range( + self, text: str, cursor_pos: int + ) -> tuple[int, int] | None: + before_cursor = text[:cursor_pos] + if "@" in before_cursor: + at_index = before_cursor.rfind("@") + return (at_index, cursor_pos) + return None + + +class MultiCompleter(Completer): + def __init__(self, completers: list[Completer]) -> None: + self.completers = completers + + def get_completions(self, text: str, cursor_pos: int) -> list[str]: + all_completions = [] + for completer in self.completers: + completions = completer.get_completions(text, cursor_pos) + all_completions.extend(completions) + + seen = set() + unique = [] + for comp in all_completions: + if comp not in seen: + seen.add(comp) + unique.append(comp) + + return unique + + def get_replacement_range( + self, text: str, cursor_pos: int + ) -> tuple[int, int] | None: + for completer in self.completers: + range_result = completer.get_replacement_range(text, cursor_pos) + if range_result is not None: + return range_result + return None diff --git a/vibe/core/autocompletion/file_indexer/__init__.py b/vibe/core/autocompletion/file_indexer/__init__.py new file mode 100644 index 0000000..0df860c --- /dev/null +++ b/vibe/core/autocompletion/file_indexer/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from vibe.core.autocompletion.file_indexer.indexer import FileIndexer +from vibe.core.autocompletion.file_indexer.store import ( + FileIndexStats, + FileIndexStore, + IndexEntry, +) + +__all__ = ["FileIndexStats", "FileIndexStore", "FileIndexer", "IndexEntry"] diff --git a/vibe/core/autocompletion/file_indexer/ignore_rules.py b/vibe/core/autocompletion/file_indexer/ignore_rules.py new file mode 100644 index 0000000..19eebe7 --- /dev/null +++ b/vibe/core/autocompletion/file_indexer/ignore_rules.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +from dataclasses import dataclass +import fnmatch +from pathlib import Path + +DEFAULT_IGNORE_PATTERNS: list[tuple[str, bool]] = [ + (".git/", True), + ("__pycache__/", True), + ("node_modules/", True), + (".DS_Store", True), + ("*.pyc", True), + ("*.log", True), + (".vscode/", True), + (".idea/", True), + ("/build/", True), + ("dist/", True), + ("target/", True), + (".next/", True), + (".nuxt/", True), + ("coverage/", True), + (".nyc_output/", True), + ("*.egg-info", True), + (".pytest_cache/", True), + (".tox/", True), + ("vendor/", True), + ("third_party/", True), + ("deps/", True), + ("*.min.js", True), + ("*.min.css", True), + ("*.bundle.js", True), + ("*.chunk.js", True), + (".cache/", True), + ("tmp/", True), + ("temp/", True), + ("logs/", True), + (".uv-cache/", True), + (".ruff_cache/", True), + (".venv/", True), + ("venv/", True), + (".mypy_cache/", True), + ("htmlcov/", True), + (".coverage", True), +] + + +@dataclass(slots=True) +class CompiledPattern: + raw: str + stripped: str + is_exclude: bool + dir_only: bool + name_only: bool + anchor_root: bool + + +class IgnoreRules: + def __init__(self, defaults: list[tuple[str, bool]] | None = None) -> None: + self._defaults = defaults or DEFAULT_IGNORE_PATTERNS + self._patterns: list[CompiledPattern] | None = None + self._root: Path | None = None + + def ensure_for_root(self, root: Path) -> None: + resolved_root = root.resolve() + if self._patterns is None or self._root != resolved_root: + self._patterns = self._build_patterns(resolved_root) + self._root = resolved_root + + def should_ignore(self, rel_str: str, name: str, is_dir: bool) -> bool: + if not self._patterns: + return False + + ignored = False + for pattern in self._patterns: + if self._matches(rel_str, name, is_dir, pattern): + ignored = pattern.is_exclude + return ignored + + def reset(self) -> None: + self._patterns = None + self._root = None + + def _build_patterns(self, root: Path) -> list[CompiledPattern]: + patterns: list[CompiledPattern] = [] + for raw, is_exclude in self._defaults: + anchor_root = raw.startswith("/") + if anchor_root: + raw = raw[1:] + + stripped = raw.rstrip("/") + patterns.append( + CompiledPattern( + raw=raw, + stripped=stripped, + is_exclude=is_exclude, + dir_only=raw.endswith("/"), + name_only="/" not in stripped, + anchor_root=anchor_root, + ) + ) + + gitignore_path = root / ".gitignore" + if gitignore_path.exists(): + try: + text = gitignore_path.read_text(encoding="utf-8") + except Exception: + return patterns + + for line in text.splitlines(): + raw = line.strip() + if not raw or raw.startswith("#"): + continue + + if "#" in raw: + raw = raw.split("#", 1)[0].rstrip() + if not raw: + continue + + is_exclude = not raw.startswith("!") + if not is_exclude: + raw = raw[1:].lstrip() + if not raw: + continue + + anchor_root = raw.startswith("/") + if anchor_root: + raw = raw[1:] + + stripped = raw.rstrip("/") + patterns.append( + CompiledPattern( + raw=raw, + stripped=stripped, + is_exclude=is_exclude, + dir_only=raw.endswith("/"), + name_only="/" not in stripped, + anchor_root=anchor_root, + ) + ) + + return patterns + + def _matches( + self, rel_str: str, name: str, is_dir: bool, pattern: CompiledPattern + ) -> bool: + if pattern.name_only: + if pattern.anchor_root and "/" in rel_str: + return False + target = name + else: + target = rel_str + + if not fnmatch.fnmatch(target, pattern.stripped): + return False + + return not pattern.dir_only or is_dir diff --git a/vibe/core/autocompletion/file_indexer/indexer.py b/vibe/core/autocompletion/file_indexer/indexer.py new file mode 100644 index 0000000..55015c3 --- /dev/null +++ b/vibe/core/autocompletion/file_indexer/indexer.py @@ -0,0 +1,176 @@ +from __future__ import annotations + +from collections.abc import Iterable +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from pathlib import Path +from threading import Event, RLock + +from vibe.core.autocompletion.file_indexer.ignore_rules import IgnoreRules +from vibe.core.autocompletion.file_indexer.store import ( + FileIndexStats, + FileIndexStore, + IndexEntry, +) +from vibe.core.autocompletion.file_indexer.watcher import Change, WatchController + + +@dataclass(slots=True) +class _RebuildTask: + cancel_event: Event + done_event: Event + + +class FileIndexer: + def __init__(self, mass_change_threshold: int = 200) -> None: + self._lock = RLock() # guards _store snapshot access and watcher callbacks. + self._stats = FileIndexStats() + self._ignore_rules = IgnoreRules() + self._store = FileIndexStore( + self._ignore_rules, self._stats, mass_change_threshold=mass_change_threshold + ) + self._watcher = WatchController(self._handle_watch_changes) + self._rebuild_executor = ThreadPoolExecutor( + max_workers=1, thread_name_prefix="file-indexer" + ) + self._active_rebuilds: dict[Path, _RebuildTask] = {} + self._rebuild_lock = ( + RLock() + ) # coordinates updates to _active_rebuilds and _target_root. + self._target_root: Path | None = None + self._shutdown = False + + @property + def stats(self) -> FileIndexStats: + return self._stats + + def get_index(self, root: Path) -> list[IndexEntry]: + resolved_root = root.resolve() + + with self._lock: # read current root without blocking rebuild bookkeeping + root_changed = ( + self._store.root is not None and self._store.root != resolved_root + ) + + if root_changed: + self._watcher.stop() + with self._rebuild_lock: # cancel rebuilds targeting other roots + self._target_root = resolved_root + for other_root, task in self._active_rebuilds.items(): + if other_root != resolved_root: + task.cancel_event.set() + task.done_event.set() + self._active_rebuilds.pop(other_root, None) + + with self._lock: + needs_rebuild = self._store.root != resolved_root + + if needs_rebuild: + with self._rebuild_lock: + self._target_root = resolved_root + self._start_background_rebuild(resolved_root) + self._wait_for_rebuild(resolved_root) + + self._watcher.start(resolved_root) + + with self._lock: # ensure root reference is fresh before snapshotting + return self._store.snapshot() + + def refresh(self) -> None: + self._watcher.stop() + with self._rebuild_lock: + for task in self._active_rebuilds.values(): + task.cancel_event.set() + task.done_event.set() + self._active_rebuilds.clear() + self._target_root = None + with self._lock: + self._store.clear() + self._ignore_rules.reset() + + def shutdown(self) -> None: + if self._shutdown: + return + self._shutdown = True + self.refresh() + self._rebuild_executor.shutdown(wait=True) + + def __del__(self) -> None: + if not self._shutdown: + try: + self.shutdown() + except Exception: + pass + + def _start_background_rebuild(self, root: Path) -> None: + with self._rebuild_lock: # one rebuild per root + if root in self._active_rebuilds: + return + + cancel_event = Event() + done_event = Event() + self._active_rebuilds[root] = _RebuildTask( + cancel_event=cancel_event, done_event=done_event + ) + + try: + self._rebuild_executor.submit( + self._rebuild_worker, root, self._active_rebuilds[root] + ) + except RuntimeError: + with self._rebuild_lock: + self._active_rebuilds.pop(root, None) + done_event.set() + + def _rebuild_worker(self, root: Path, task: _RebuildTask) -> None: + try: + if task.cancel_event.is_set(): # cancelled before work began + with self._rebuild_lock: + self._active_rebuilds.pop(root, None) + return + + with self._rebuild_lock: # bail if another root took ownership + if self._target_root != root: + self._active_rebuilds.pop(root, None) + return + + with self._lock: # exclusive access while rebuilding the store + if task.cancel_event.is_set(): + with self._rebuild_lock: + self._active_rebuilds.pop(root, None) + return + + self._store.rebuild( + root, should_cancel=lambda: task.cancel_event.is_set() + ) + + with self._rebuild_lock: + self._active_rebuilds.pop(root, None) + except Exception: + with self._rebuild_lock: + self._active_rebuilds.pop(root, None) + finally: + task.done_event.set() + + def _wait_for_rebuild(self, root: Path) -> None: + with self._rebuild_lock: + task = self._active_rebuilds.get(root) + if task: + task.done_event.wait() + + def _handle_watch_changes( + self, root: Path, raw_changes: Iterable[tuple[Change, str]] + ) -> None: + normalized: list[tuple[Change, Path]] = [] + for change, path_str in raw_changes: + if change not in {Change.added, Change.deleted, Change.modified}: + continue + normalized.append((change, Path(path_str).resolve())) + + if not normalized: + return + + with self._lock: # make watcher ignore stale roots + if self._store.root != root: + return + self._store.apply_changes(normalized) diff --git a/vibe/core/autocompletion/file_indexer/store.py b/vibe/core/autocompletion/file_indexer/store.py new file mode 100644 index 0000000..8efc47d --- /dev/null +++ b/vibe/core/autocompletion/file_indexer/store.py @@ -0,0 +1,169 @@ +from __future__ import annotations + +from collections.abc import Callable +from dataclasses import dataclass +import os +from pathlib import Path + +from vibe.core.autocompletion.file_indexer.ignore_rules import IgnoreRules +from vibe.core.autocompletion.file_indexer.watcher import Change + + +@dataclass(slots=True) +class FileIndexStats: + rebuilds: int = 0 + incremental_updates: int = 0 + + +@dataclass(slots=True) +class IndexEntry: + rel: str + rel_lower: str + name: str + path: Path + is_dir: bool + + +class FileIndexStore: + def __init__( + self, + ignore_rules: IgnoreRules, + stats: FileIndexStats, + mass_change_threshold: int = 200, + ) -> None: + self._ignore_rules = ignore_rules + self._stats = stats + self._mass_change_threshold = mass_change_threshold + self._entries_by_rel: dict[str, IndexEntry] = {} + self._ordered_entries: list[IndexEntry] | None = None + self._root: Path | None = None + + @property + def root(self) -> Path | None: + return self._root + + def clear(self) -> None: + self._entries_by_rel.clear() + self._ordered_entries = None + self._root = None + + def rebuild( + self, root: Path, should_cancel: Callable[[], bool] | None = None + ) -> None: + resolved_root = root.resolve() + self._ignore_rules.ensure_for_root(resolved_root) + entries = self._walk_directory(resolved_root, cancel_check=should_cancel) + self._entries_by_rel = {entry.rel: entry for entry in entries} + self._ordered_entries = entries + self._root = resolved_root + self._stats.rebuilds += 1 + + def snapshot(self) -> list[IndexEntry]: + if not self._entries_by_rel: + return [] + + if self._ordered_entries is None: + self._ordered_entries = sorted( + self._entries_by_rel.values(), key=lambda entry: entry.rel + ) + + return list(self._ordered_entries) + + def apply_changes(self, changes: list[tuple[Change, Path]]) -> None: + if self._root is None: + return + + if len(changes) > self._mass_change_threshold: + self.rebuild(self._root) + return + + modified = False + for change, path in changes: + try: + rel_str = path.relative_to(self._root).as_posix() + except ValueError: + continue + + if not rel_str: + continue + + if change is Change.deleted: + if self._remove_entry(rel_str): + modified = True + continue + + if not path.exists(): + continue + + if path.is_dir(): + dir_entry = self._create_entry(rel_str, path.name, path, True) + if dir_entry: + self._entries_by_rel[rel_str] = dir_entry + modified = True + for entry in self._walk_directory(path, rel_str): + self._entries_by_rel[entry.rel] = entry + modified = True + else: + file_entry = self._create_entry(rel_str, path.name, path, False) + if file_entry: + self._entries_by_rel[file_entry.rel] = file_entry + modified = True + + if modified: + self._ordered_entries = None + self._stats.incremental_updates += 1 + + def _create_entry( + self, rel_str: str, name: str, path: Path, is_dir: bool + ) -> IndexEntry | None: + if self._ignore_rules.should_ignore(rel_str, name, is_dir): + return None + return IndexEntry( + rel=rel_str, rel_lower=rel_str.lower(), name=name, path=path, is_dir=is_dir + ) + + def _walk_directory( + self, + directory: Path, + rel_prefix: str = "", + cancel_check: Callable[[], bool] | None = None, + ) -> list[IndexEntry]: + results: list[IndexEntry] = [] + try: + with os.scandir(directory) as iterator: + for entry in iterator: + if cancel_check and cancel_check(): + break + + is_dir = entry.is_dir(follow_symlinks=False) + name = entry.name + rel_str = f"{rel_prefix}/{name}" if rel_prefix else name + path = Path(entry.path) + + index_entry = self._create_entry(rel_str, name, path, is_dir) + if not index_entry: + continue + + results.append(index_entry) + + if is_dir: + results.extend( + self._walk_directory(path, rel_str, cancel_check) + ) + except (PermissionError, OSError): + pass + + return results + + def _remove_entry(self, rel_str: str) -> bool: + entry = self._entries_by_rel.pop(rel_str, None) + if not entry: + return False + + if entry.is_dir: + prefix = f"{rel_str}/" + to_remove = [key for key in self._entries_by_rel if key.startswith(prefix)] + for key in to_remove: + self._entries_by_rel.pop(key, None) + + return True diff --git a/vibe/core/autocompletion/file_indexer/watcher.py b/vibe/core/autocompletion/file_indexer/watcher.py new file mode 100644 index 0000000..0eddd56 --- /dev/null +++ b/vibe/core/autocompletion/file_indexer/watcher.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +from collections.abc import Callable, Iterable +from pathlib import Path +from threading import Event, Thread + +from watchfiles import Change, watch + + +class WatchController: + def __init__( + self, on_changes: Callable[[Path, Iterable[tuple[Change, str]]], None] + ) -> None: + self._on_changes = on_changes + self._thread: Thread | None = None + self._stop_event: Event | None = None + self._ready_event: Event | None = None + self._root: Path | None = None + + def start(self, root: Path) -> None: + resolved_root = root.resolve() + if self._thread and self._thread.is_alive() and self._root == resolved_root: + return + + self.stop() + + stop_event = Event() + ready_event = Event() + thread = Thread( + target=self._watch_loop, + args=(resolved_root, stop_event, ready_event), + name="file-indexer-watch", + daemon=True, + ) + + self._thread = thread + self._stop_event = stop_event + self._ready_event = ready_event + self._root = resolved_root + + thread.start() + ready_event.wait(timeout=0.5) + + def stop(self) -> None: + thread = self._thread + if self._stop_event: + self._stop_event.set() + self._thread = None + self._stop_event = None + self._ready_event = None + self._root = None + + if thread and thread.is_alive(): + thread.join(timeout=1) + + def _watch_loop(self, root: Path, stop_event: Event, ready_event: Event) -> None: + try: + watcher = watch( + str(root), stop_event=stop_event, step=200, yield_on_timeout=True + ) + ready_event.set() + for changes in watcher: + if not ready_event.is_set(): + ready_event.set() + if stop_event.is_set(): + break + if not changes: + continue + self._on_changes(root, changes) + except Exception: + ready_event.set() diff --git a/vibe/core/autocompletion/fuzzy.py b/vibe/core/autocompletion/fuzzy.py new file mode 100644 index 0000000..149dbf9 --- /dev/null +++ b/vibe/core/autocompletion/fuzzy.py @@ -0,0 +1,189 @@ +from __future__ import annotations + +from dataclasses import dataclass + +PREFIX_MULTIPLIER = 2.0 +WORD_BOUNDARY_MULTIPLIER = 1.8 +CONSECUTIVE_MULTIPLIER = 1.3 + + +@dataclass(frozen=True) +class MatchResult: + matched: bool + score: float + matched_indices: tuple[int, ...] + + +def fuzzy_match(pattern: str, text: str, text_lower: str | None = None) -> MatchResult: + if not pattern: + return MatchResult(matched=True, score=0.0, matched_indices=()) + + if text_lower is None: + text_lower = text.lower() + return _find_best_match(pattern, pattern.lower(), text_lower, text) + + +def _find_best_match( + pattern_original: str, pattern_lower: str, text_lower: str, text_original: str +) -> MatchResult: + if len(pattern_lower) > len(text_lower): + return MatchResult(matched=False, score=0.0, matched_indices=()) + + if text_lower.startswith(pattern_lower): + indices = tuple(range(len(pattern_lower))) + score = _calculate_score( + pattern_original, pattern_lower, text_lower, indices, text_original + ) + return MatchResult( + matched=True, score=score * PREFIX_MULTIPLIER, matched_indices=indices + ) + + best_score = -1.0 + best_indices: tuple[int, ...] = () + + for matcher in ( + _try_word_boundary_match, + _try_consecutive_match, + _try_subsequence_match, + ): + match = matcher(pattern_original, pattern_lower, text_lower, text_original) + if match.matched and match.score > best_score: + best_score = match.score + best_indices = match.matched_indices + + if best_score >= 0: + return MatchResult(matched=True, score=best_score, matched_indices=best_indices) + + return MatchResult(matched=False, score=0.0, matched_indices=()) + + +def _try_word_boundary_match( + pattern_original: str, pattern: str, text_lower: str, text_original: str +) -> MatchResult: + indices: list[int] = [] + pattern_idx = 0 + + for i, char in enumerate(text_lower): + if pattern_idx >= len(pattern): + break + + is_boundary = ( + i == 0 + or text_lower[i - 1] in "/-_." + or (text_original[i].isupper() and not text_original[i - 1].isupper()) + ) + + if char == pattern[pattern_idx]: + if is_boundary or (indices and i == indices[-1] + 1) or not indices: + indices.append(i) + pattern_idx += 1 + + if pattern_idx == len(pattern): + score = _calculate_score( + pattern_original, pattern, text_lower, tuple(indices), text_original + ) + return MatchResult( + matched=True, + score=score * WORD_BOUNDARY_MULTIPLIER, + matched_indices=tuple(indices), + ) + + return MatchResult(matched=False, score=0.0, matched_indices=()) + + +def _try_consecutive_match( + pattern_original: str, pattern: str, text_lower: str, text_original: str +) -> MatchResult: + indices: list[int] = [] + pattern_idx = 0 + + for i, char in enumerate(text_lower): + if pattern_idx >= len(pattern): + break + + if char == pattern[pattern_idx]: + indices.append(i) + pattern_idx += 1 + elif indices: + indices.clear() + pattern_idx = 0 + + if pattern_idx == len(pattern): + score = _calculate_score( + pattern_original, pattern, text_lower, tuple(indices), text_original + ) + return MatchResult( + matched=True, + score=score * CONSECUTIVE_MULTIPLIER, + matched_indices=tuple(indices), + ) + + return MatchResult(matched=False, score=0.0, matched_indices=()) + + +def _try_subsequence_match( + pattern_original: str, pattern: str, text_lower: str, text_original: str +) -> MatchResult: + indices: list[int] = [] + pattern_idx = 0 + + for i, char in enumerate(text_lower): + if pattern_idx >= len(pattern): + break + if char == pattern[pattern_idx]: + indices.append(i) + pattern_idx += 1 + + if pattern_idx == len(pattern): + score = _calculate_score( + pattern_original, pattern, text_lower, tuple(indices), text_original + ) + return MatchResult(matched=True, score=score, matched_indices=tuple(indices)) + + return MatchResult(matched=False, score=0.0, matched_indices=()) + + +def _calculate_score( + pattern_original: str, + pattern: str, + text_lower: str, + indices: tuple[int, ...], + text_original: str, +) -> float: + if not indices: + return 0.0 + + base_score = 100.0 + if indices[0] == 0: + base_score += 50.0 + else: + base_score -= indices[0] * 2 + + consecutive_bonus = sum( + 10.0 for i in range(len(indices) - 1) if indices[i + 1] == indices[i] + 1 + ) + + boundary_bonus = 0.0 + for idx in indices: + if idx == 0 or text_lower[idx - 1] in "/-_.": + boundary_bonus += 5.0 + elif text_original[idx].isupper() and ( + idx == 0 or not text_original[idx - 1].isupper() + ): + boundary_bonus += 3.0 + + case_bonus = sum( + 2.0 + for i, text_idx in enumerate(indices) + if i < len(pattern_original) + and text_idx < len(text_original) + and pattern_original[i] == text_original[text_idx] + ) + + gap_penalty = sum( + (indices[i + 1] - indices[i] - 1) * 1.5 for i in range(len(indices) - 1) + ) + + return max( + 0.0, base_score + consecutive_bonus + boundary_bonus + case_bonus - gap_penalty + ) diff --git a/vibe/core/autocompletion/path_prompt.py b/vibe/core/autocompletion/path_prompt.py new file mode 100644 index 0000000..49880e3 --- /dev/null +++ b/vibe/core/autocompletion/path_prompt.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path +from typing import Literal + + +@dataclass(frozen=True, slots=True) +class PathResource: + path: Path + alias: str + kind: Literal["file", "directory"] + + +@dataclass(frozen=True, slots=True) +class PathPromptPayload: + display_text: str + prompt_text: str + resources: list[PathResource] + + +def build_path_prompt_payload( + message: str, *, base_dir: Path | None = None +) -> PathPromptPayload: + if not message: + return PathPromptPayload(message, message, []) + + resolved_base = (base_dir or Path.cwd()).resolve() + prompt_parts: list[str] = [] + resources: list[PathResource] = [] + pos = 0 + + while pos < len(message): + if _is_path_anchor(message, pos): + candidate, new_pos = _extract_candidate(message, pos + 1) + if candidate and (resource := _to_resource(candidate, resolved_base)): + resources.append(resource) + prompt_parts.append(candidate) + pos = new_pos + continue + + prompt_parts.append(message[pos]) + pos += 1 + + prompt_text = "".join(prompt_parts) + unique_resources = _dedupe_resources(resources) + return PathPromptPayload(message, prompt_text, unique_resources) + + +def _is_path_anchor(message: str, pos: int) -> bool: + if message[pos] != "@": + return False + if pos == 0: + return True + return not (message[pos - 1].isalnum() or message[pos - 1] == "_") + + +def _extract_candidate(message: str, start: int) -> tuple[str | None, int]: + if start >= len(message): + return None, start + + quote = message[start] + if quote in {"'", '"'}: + end_quote = message.find(quote, start + 1) + if end_quote == -1: + return None, start + return message[start + 1 : end_quote], end_quote + 1 + + end = start + while end < len(message) and _is_path_char(message[end]): + end += 1 + + if end == start: + return None, start + + return message[start:end], end + + +def _is_path_char(char: str) -> bool: + return char.isalnum() or char in "._/\\-()[]{}" + + +def _to_resource(candidate: str, base_dir: Path) -> PathResource | None: + if not candidate: + return None + + candidate_path = Path(candidate) + resolved = ( + candidate_path if candidate_path.is_absolute() else base_dir / candidate_path + ) + resolved = resolved.resolve() + + if not resolved.exists(): + return None + + kind = "directory" if resolved.is_dir() else "file" + return PathResource(path=resolved, alias=candidate, kind=kind) + + +def _dedupe_resources(resources: list[PathResource]) -> list[PathResource]: + seen: set[Path] = set() + unique: list[PathResource] = [] + for resource in resources: + if resource.path in seen: + continue + seen.add(resource.path) + unique.append(resource) + return unique diff --git a/vibe/core/autocompletion/path_prompt_adapter.py b/vibe/core/autocompletion/path_prompt_adapter.py new file mode 100644 index 0000000..de2f163 --- /dev/null +++ b/vibe/core/autocompletion/path_prompt_adapter.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +from collections.abc import Sequence +import mimetypes +from pathlib import Path + +from vibe.core.autocompletion.path_prompt import ( + PathPromptPayload, + PathResource, + build_path_prompt_payload, +) + +DEFAULT_MAX_EMBED_BYTES = 256 * 1024 + +ResourceBlock = dict[str, str | None] + + +def render_path_prompt( + message: str, + *, + base_dir: Path, + max_embed_bytes: int | None = DEFAULT_MAX_EMBED_BYTES, +) -> str: + payload = build_path_prompt_payload(message, base_dir=base_dir) + blocks = _path_prompt_to_content_blocks(payload, max_embed_bytes=max_embed_bytes) + return _content_blocks_to_prompt_text(blocks) + + +def _path_prompt_to_content_blocks( + payload: PathPromptPayload, *, max_embed_bytes: int | None = DEFAULT_MAX_EMBED_BYTES +) -> list[ResourceBlock]: + blocks: list[ResourceBlock] = [{"type": "text", "text": payload.prompt_text}] + + for resource in payload.resources: + match resource.kind: + case "file": + embedded = _try_embed_text_resource(resource, max_embed_bytes) + if embedded: + blocks.append(embedded) + else: + blocks.append({ + "type": "resource_link", + "uri": resource.path.as_uri(), + "name": resource.alias, + }) + case "directory": + blocks.append({ + "type": "resource_link", + "uri": resource.path.as_uri(), + "name": resource.alias, + }) + + return blocks + + +def _try_embed_text_resource( + resource: PathResource, max_embed_bytes: int | None +) -> ResourceBlock | None: + try: + data = resource.path.read_bytes() + except OSError: + return None + + if max_embed_bytes is not None and len(data) > max_embed_bytes: + return None + + if not _is_probably_text(resource, data): + return None + + try: + text = data.decode("utf-8") + except UnicodeDecodeError: + return None + + return {"type": "resource", "uri": resource.path.as_uri(), "text": text} + + +def _content_blocks_to_prompt_text(blocks: Sequence[ResourceBlock]) -> str: + parts = [] + + for block in blocks: + block_text = _format_content_block(block) + if block_text is not None: + parts.append(block_text) + + return "\n\n".join(parts) + + +def _format_content_block(block: ResourceBlock) -> str | None: + match block.get("type"): + case "text": + return block.get("text") or "" + + case "resource": + block_content = block.get("text") or "" + fence = "```" + return f"{block.get('uri')}\n{fence}\n{block_content}\n{fence}" + + case "resource_link": + fields = { + "uri": block.get("uri"), + "name": block.get("name"), + "title": block.get("title"), + "description": block.get("description"), + "mimeType": block.get("mimeType"), + "size": block.get("size"), + } + parts = [ + f"{k}: {v}" + for k, v in fields.items() + if v is not None and (v or isinstance(v, (int, float))) + ] + return "\n".join(parts) + + case _: + return None + + +BINARY_MIME_PREFIXES = ( + "audio/", + "image/", + "video/", + "application/zip", + "application/x-zip-compressed", +) + + +def _is_probably_text(path: PathResource, data: bytes) -> bool: + mime_guess, _ = mimetypes.guess_type(path.path.name) + if mime_guess and mime_guess.startswith(BINARY_MIME_PREFIXES): + return False + + if not data: + return True + if b"\x00" in data: + return False + + DEL_CODE = 127 + NON_PRINTABLE_MAX_PROPORTION = 0.1 + NON_PRINTABLE_MAX_CODE = 31 + NON_PRINTABLE_EXCEPTIONS = [9, 10, 11, 12] + non_text = sum( + 1 + for b in data + if b <= NON_PRINTABLE_MAX_CODE + and b not in NON_PRINTABLE_EXCEPTIONS + or b == DEL_CODE + ) + return (non_text / len(data)) < NON_PRINTABLE_MAX_PROPORTION diff --git a/vibe/core/config.py b/vibe/core/config.py new file mode 100644 index 0000000..891b2db --- /dev/null +++ b/vibe/core/config.py @@ -0,0 +1,566 @@ +from __future__ import annotations + +from enum import StrEnum, auto +import os +from pathlib import Path +import re +import shlex +import tomllib +from typing import Annotated, Any, Literal + +from dotenv import dotenv_values +from pydantic import BaseModel, Field, field_validator, model_validator +from pydantic.fields import FieldInfo +from pydantic_core import to_jsonable_python +from pydantic_settings import ( + BaseSettings, + PydanticBaseSettingsSource, + SettingsConfigDict, +) +import tomli_w + +from vibe.core.prompts import SystemPrompt +from vibe.core.tools.base import BaseToolConfig + + +def get_vibe_home() -> Path: + if vibe_home := os.getenv("VIBE_HOME"): + return Path(vibe_home).expanduser().resolve() + return Path.home() / ".vibe" + + +GLOBAL_CONFIG_DIR = get_vibe_home() +GLOBAL_CONFIG_FILE = GLOBAL_CONFIG_DIR / "config.toml" +GLOBAL_ENV_FILE = GLOBAL_CONFIG_DIR / ".env" + + +def resolve_config_file() -> Path: + for directory in (cwd := Path.cwd(), *cwd.parents): + if (candidate := directory / ".vibe" / "config.toml").is_file(): + return candidate + return GLOBAL_CONFIG_FILE + + +def load_api_keys_from_env() -> None: + if GLOBAL_ENV_FILE.is_file(): + env_vars = dotenv_values(GLOBAL_ENV_FILE) + for key, value in env_vars.items(): + if value: + os.environ.setdefault(key, value) + + +CONFIG_FILE = resolve_config_file() +CONFIG_DIR = CONFIG_FILE.parent +AGENT_DIR = CONFIG_DIR / "agents" +PROMPT_DIR = CONFIG_DIR / "prompts" +INSTRUCTIONS_FILE = CONFIG_DIR / "instructions.md" +HISTORY_FILE = CONFIG_DIR / "vibehistory" +PROJECT_DOC_FILENAMES = ["AGENTS.md", "VIBE.md", ".vibe.md"] + + +class MissingAPIKeyError(RuntimeError): + def __init__(self, env_key: str, provider_name: str) -> None: + super().__init__( + f"Missing {env_key} environment variable for {provider_name} provider" + ) + self.env_key = env_key + self.provider_name = provider_name + + +class MissingPromptFileError(RuntimeError): + def __init__(self, system_prompt_id: str, prompt_dir: str) -> None: + super().__init__( + f"Invalid system_prompt_id value: '{system_prompt_id}'. " + f"Must be one of the available prompts ({', '.join(f'{p.name.lower()}' for p in SystemPrompt)}), " + f"or correspond to a .md file in {prompt_dir}" + ) + self.system_prompt_id = system_prompt_id + self.prompt_dir = prompt_dir + + +class WrongBackendError(RuntimeError): + def __init__(self, backend: Backend, is_mistral_api: bool) -> None: + super().__init__( + f"Wrong backend '{backend}' for {'' if is_mistral_api else 'non-'}" + f"mistral API. Use '{Backend.MISTRAL}' for mistral API and '{Backend.GENERIC}' for others." + ) + self.backend = backend + self.is_mistral_api = is_mistral_api + + +class TomlFileSettingsSource(PydanticBaseSettingsSource): + def __init__(self, settings_cls: type[BaseSettings]) -> None: + super().__init__(settings_cls) + self.toml_data = self._load_toml() + + def _load_toml(self) -> dict[str, Any]: + file = CONFIG_FILE + try: + with file.open("rb") as f: + return tomllib.load(f) + except FileNotFoundError: + return {} + except tomllib.TOMLDecodeError as e: + raise RuntimeError(f"Invalid TOML in {file}: {e}") from e + except OSError as e: + raise RuntimeError(f"Cannot read {file}: {e}") from e + + def get_field_value( + self, field: FieldInfo, field_name: str + ) -> tuple[Any, str, bool]: + return self.toml_data.get(field_name), field_name, False + + def __call__(self) -> dict[str, Any]: + return self.toml_data + + +class ProjectContextConfig(BaseSettings): + max_chars: int = 40_000 + default_commit_count: int = 5 + max_doc_bytes: int = 32 * 1024 + truncation_buffer: int = 1_000 + max_depth: int = 3 + max_files: int = 1000 + max_dirs_per_level: int = 20 + timeout_seconds: float = 2.0 + + +class SessionLoggingConfig(BaseSettings): + save_dir: str = "" + session_prefix: str = "session" + enabled: bool = True + + @field_validator("save_dir", mode="before") + @classmethod + def set_default_save_dir(cls, v: str) -> str: + if not v: + return str(get_vibe_home() / "logs" / "session") + return v + + @field_validator("save_dir", mode="after") + @classmethod + def expand_save_dir(cls, v: str) -> str: + return str(Path(v).expanduser().resolve()) + + +class Backend(StrEnum): + MISTRAL = auto() + GENERIC = auto() + + +class ProviderConfig(BaseModel): + name: str + api_base: str + api_key_env_var: str = "" + api_style: str = "openai" + backend: Backend = Backend.GENERIC + + +class _MCPBase(BaseModel): + name: str = Field(description="Short alias used to prefix tool names") + prompt: str | None = Field( + default=None, description="Optional usage hint appended to tool descriptions" + ) + + @field_validator("name", mode="after") + @classmethod + def normalize_name(cls, v: str) -> str: + normalized = re.sub(r"[^a-zA-Z0-9_-]", "_", v) + normalized = normalized.strip("_-") + return normalized[:256] + + +class _MCPHttpFields(BaseModel): + url: str = Field(description="Base URL of the MCP HTTP server") + headers: dict[str, str] = Field( + default_factory=dict, + description=( + "Additional HTTP headers when using 'http' transport (e.g., Authorization or X-API-Key)." + ), + ) + api_key_env: str = Field( + default="", + description=( + "Environment variable name containing an API token to send for HTTP transport." + ), + ) + api_key_header: str = Field( + default="Authorization", + description=( + "HTTP header name to carry the token when 'api_key_env' is set (e.g., 'Authorization' or 'X-API-Key')." + ), + ) + api_key_format: str = Field( + default="Bearer {token}", + description=( + "Format string for the header value when 'api_key_env' is set. Use '{token}' placeholder." + ), + ) + + def http_headers(self) -> dict[str, str]: + hdrs = dict(self.headers or {}) + env_var = (self.api_key_env or "").strip() + if env_var and (token := os.getenv(env_var)): + target = (self.api_key_header or "").strip() or "Authorization" + if not any(h.lower() == target.lower() for h in hdrs): + try: + value = (self.api_key_format or "{token}").format(token=token) + except Exception: + value = token + hdrs[target] = value + return hdrs + + +class MCPHttp(_MCPBase, _MCPHttpFields): + transport: Literal["http"] + + +class MCPStreamableHttp(_MCPBase, _MCPHttpFields): + transport: Literal["streamable-http"] + + +class MCPStdio(_MCPBase): + transport: Literal["stdio"] + command: str | list[str] + args: list[str] = Field(default_factory=list) + + def argv(self) -> list[str]: + base = ( + shlex.split(self.command) + if isinstance(self.command, str) + else list(self.command or []) + ) + return [*base, *self.args] if self.args else base + + +MCPServer = Annotated[ + MCPHttp | MCPStreamableHttp | MCPStdio, Field(discriminator="transport") +] + + +class ModelConfig(BaseModel): + name: str + provider: str + alias: str + temperature: float = 0.2 + input_price: float = 0.0 # Price per million input tokens + output_price: float = 0.0 # Price per million output tokens + + @model_validator(mode="before") + @classmethod + def _default_alias_to_name(cls, data: Any) -> Any: + if isinstance(data, dict): + if "alias" not in data or data["alias"] is None: + data["alias"] = data.get("name") + return data + + +DEFAULT_PROVIDERS = [ + ProviderConfig( + name="mistral", + api_base="https://api.mistral.ai/v1", + api_key_env_var="MISTRAL_API_KEY", + backend=Backend.MISTRAL, + ), + ProviderConfig( + name="llamacpp", + api_base="http://127.0.0.1:8080/v1", + api_key_env_var="", # NOTE: if you wish to use --api-key in llama-server, change this value + ), +] + +DEFAULT_MODELS = [ + ModelConfig( + name="mistral-vibe-cli-latest", + provider="mistral", + alias="devstral-2", + input_price=0.4, + output_price=2.0, + ), + ModelConfig( + name="devstral-small-latest", + provider="mistral", + alias="devstral-small", + input_price=0.1, + output_price=0.3, + ), + ModelConfig( + name="devstral", + provider="llamacpp", + alias="local", + input_price=0.0, + output_price=0.0, + ), +] + + +class VibeConfig(BaseSettings): + active_model: str = "devstral-2" + vim_keybindings: bool = False + disable_welcome_banner_animation: bool = False + displayed_workdir: str = "" + auto_compact_threshold: int = 100_000 + context_warnings: bool = False + textual_theme: str = "textual-dark" + instructions: str = "" + workdir: Path | None = Field(default=None, exclude=True) + system_prompt_id: str = "cli" + include_model_info: bool = True + include_project_context: bool = True + include_prompt_detail: bool = True + enable_update_checks: bool = True + api_timeout: float = 720.0 + providers: list[ProviderConfig] = Field( + default_factory=lambda: list(DEFAULT_PROVIDERS) + ) + models: list[ModelConfig] = Field(default_factory=lambda: list(DEFAULT_MODELS)) + + project_context: ProjectContextConfig = Field(default_factory=ProjectContextConfig) + session_logging: SessionLoggingConfig = Field(default_factory=SessionLoggingConfig) + tools: dict[str, BaseToolConfig] = Field(default_factory=dict) + tool_paths: list[str] = Field( + default_factory=list, + description=( + "Additional directories to search for custom tools. " + "Each path may be absolute or relative to the current working directory." + ), + ) + + mcp_servers: list[MCPServer] = Field( + default_factory=list, description="Preferred MCP server configuration entries." + ) + + enabled_tools: list[str] = Field( + default_factory=list, + description=( + "An explicit list of tool names/patterns to enable. If set, only these" + " tools will be active. Supports exact names, glob patterns (e.g.," + " 'serena_*'), and regex with 're:' prefix or regex-like patterns (e.g.," + " 're:^serena_.*' or 'serena.*')." + ), + ) + disabled_tools: list[str] = Field( + default_factory=list, + description=( + "A list of tool names/patterns to disable. Ignored if 'enabled_tools'" + " is set. Supports exact names, glob patterns (e.g., 'bash*'), and" + " regex with 're:' prefix or regex-like patterns." + ), + ) + + model_config = SettingsConfigDict( + env_prefix="VIBE_", case_sensitive=False, extra="forbid" + ) + + @property + def effective_workdir(self) -> Path: + return self.workdir if self.workdir is not None else Path.cwd() + + @property + def system_prompt(self) -> str: + try: + return SystemPrompt[self.system_prompt_id.upper()].read() + except KeyError: + pass + + custom_sp_path = (PROMPT_DIR / self.system_prompt_id).with_suffix(".md") + if not custom_sp_path.is_file(): + raise MissingPromptFileError(self.system_prompt_id, str(PROMPT_DIR)) + return custom_sp_path.read_text() + + def get_active_model(self) -> ModelConfig: + for model in self.models: + if model.alias == self.active_model: + return model + raise ValueError( + f"Active model '{self.active_model}' not found in configuration." + ) + + def get_provider_for_model(self, model: ModelConfig) -> ProviderConfig: + for provider in self.providers: + if provider.name == model.provider: + return provider + raise ValueError( + f"Provider '{model.provider}' for model '{model.name}' not found in configuration." + ) + + @classmethod + def settings_customise_sources( + cls, + settings_cls: type[BaseSettings], + init_settings: PydanticBaseSettingsSource, + env_settings: PydanticBaseSettingsSource, + dotenv_settings: PydanticBaseSettingsSource, + file_secret_settings: PydanticBaseSettingsSource, + ) -> tuple[PydanticBaseSettingsSource, ...]: + """Define the priority of settings sources. + + Note: dotenv_settings is intentionally excluded. API keys and other + non-config environment variables are stored in .env but loaded manually + into os.environ for use by providers. Only VIBE_* prefixed environment + variables (via env_settings) and TOML config are used for Pydantic settings. + """ + return ( + init_settings, + env_settings, + TomlFileSettingsSource(settings_cls), + file_secret_settings, + ) + + @model_validator(mode="after") + def _check_api_key(self) -> VibeConfig: + try: + active_model = self.get_active_model() + provider = self.get_provider_for_model(active_model) + api_key_env = provider.api_key_env_var + if api_key_env and not os.getenv(api_key_env): + raise MissingAPIKeyError(api_key_env, provider.name) + except ValueError: + pass + return self + + @model_validator(mode="after") + def _check_api_backend_compatibility(self) -> VibeConfig: + try: + active_model = self.get_active_model() + provider = self.get_provider_for_model(active_model) + MISTRAL_API_BASES = [ + "https://codestral.mistral.ai", + "https://api.mistral.ai", + ] + is_mistral_api = any( + provider.api_base.startswith(api_base) for api_base in MISTRAL_API_BASES + ) + if (is_mistral_api and provider.backend != Backend.MISTRAL) or ( + not is_mistral_api and provider.backend != Backend.GENERIC + ): + raise WrongBackendError(provider.backend, is_mistral_api) + + except ValueError: + pass + return self + + @field_validator("workdir", mode="before") + @classmethod + def _expand_workdir(cls, v: Any) -> Path | None: + if v is None or (isinstance(v, str) and not v.strip()): + return None + + if isinstance(v, str): + v = Path(v).expanduser().resolve() + elif isinstance(v, Path): + v = v.expanduser().resolve() + if not v.is_dir(): + raise ValueError( + f"Tried to set {v} as working directory, path doesn't exist" + ) + return v + + @field_validator("tools", mode="before") + @classmethod + def _normalize_tool_configs(cls, v: Any) -> dict[str, BaseToolConfig]: + if not isinstance(v, dict): + return {} + + normalized: dict[str, BaseToolConfig] = {} + for tool_name, tool_config in v.items(): + if isinstance(tool_config, BaseToolConfig): + normalized[tool_name] = tool_config + elif isinstance(tool_config, dict): + normalized[tool_name] = BaseToolConfig.model_validate(tool_config) + else: + normalized[tool_name] = BaseToolConfig() + + return normalized + + @model_validator(mode="after") + def _validate_model_uniqueness(self) -> VibeConfig: + seen_aliases: set[str] = set() + for model in self.models: + if model.alias in seen_aliases: + raise ValueError( + f"Duplicate model alias found: '{model.alias}'. Aliases must be unique." + ) + seen_aliases.add(model.alias) + return self + + @model_validator(mode="after") + def _check_system_prompt(self) -> VibeConfig: + _ = self.system_prompt + return self + + @classmethod + def save_updates(cls, updates: dict[str, Any]) -> None: + CONFIG_DIR.mkdir(parents=True, exist_ok=True) + current_config = TomlFileSettingsSource(cls).toml_data + + def deep_merge(target: dict, source: dict) -> None: + for key, value in source.items(): + if ( + key in target + and isinstance(target.get(key), dict) + and isinstance(value, dict) + ): + deep_merge(target[key], value) + elif ( + key in target + and isinstance(target.get(key), list) + and isinstance(value, list) + ): + if key in {"providers", "models"}: + target[key] = value + else: + target[key] = list(set(value + target[key])) + else: + target[key] = value + + deep_merge(current_config, updates) + cls.dump_config( + to_jsonable_python(current_config, exclude_none=True, fallback=str) + ) + + @classmethod + def dump_config(cls, config: dict[str, Any]) -> None: + with CONFIG_FILE.open("wb") as f: + tomli_w.dump(config, f) + + @classmethod + def _get_agent_config(cls, agent: str | None) -> dict[str, Any] | None: + if agent is None: + return None + + agent_config_path = (AGENT_DIR / agent).with_suffix(".toml") + try: + return tomllib.load(agent_config_path.open("rb")) + except FileNotFoundError: + raise ValueError( + f"Config '{agent}.toml' for agent not found in {AGENT_DIR}" + ) + + @classmethod + def _migrate(cls) -> None: + pass + + @classmethod + def load(cls, agent: str | None = None, **overrides: Any) -> VibeConfig: + cls._migrate() + agent_config = cls._get_agent_config(agent) + init_data = {**(agent_config or {}), **overrides} + return cls(**init_data) + + @classmethod + def create_default(cls) -> dict[str, Any]: + try: + config = cls() + except MissingAPIKeyError: + config = cls.model_construct() + + config_dict = config.model_dump(mode="json", exclude_none=True) + + from vibe.core.tools.manager import ToolManager + + tool_defaults = ToolManager.discover_tool_defaults() + if tool_defaults: + config_dict["tools"] = tool_defaults + + return config_dict diff --git a/vibe/core/interaction_logger.py b/vibe/core/interaction_logger.py new file mode 100644 index 0000000..c98c99d --- /dev/null +++ b/vibe/core/interaction_logger.py @@ -0,0 +1,245 @@ +from __future__ import annotations + +from datetime import datetime +import getpass +import json +from pathlib import Path +import subprocess +from typing import TYPE_CHECKING, Any + +import aiofiles + +from vibe.core.llm.format import get_active_tool_classes +from vibe.core.types import AgentStats, LLMMessage, SessionInfo, SessionMetadata +from vibe.core.utils import is_windows + +if TYPE_CHECKING: + from vibe.core.config import SessionLoggingConfig, VibeConfig + from vibe.core.tools.manager import ToolManager + + +class InteractionLogger: + def __init__( + self, + session_config: SessionLoggingConfig, + session_id: str, + auto_approve: bool = False, + workdir: Path | None = None, + ) -> None: + if workdir is None: + workdir = Path.cwd() + self.session_config = session_config + self.enabled = session_config.enabled + self.auto_approve = auto_approve + self.workdir = workdir + + if not self.enabled: + self.save_dir: Path | None = None + self.session_prefix: str | None = None + self.session_id: str = "disabled" + self.session_start_time: str = "N/A" + self.filepath: Path | None = None + self.session_metadata: SessionMetadata | None = None + return + + self.save_dir = Path(session_config.save_dir) + self.session_prefix = session_config.session_prefix + self.session_id = session_id + self.session_start_time = datetime.now().isoformat() + + self.save_dir.mkdir(parents=True, exist_ok=True) + self.filepath = self._get_save_filepath() + self.session_metadata = self._initialize_session_metadata() + + def _get_save_filepath(self) -> Path: + if self.save_dir is None or self.session_prefix is None: + raise RuntimeError("Cannot get filepath when logging is disabled") + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"{self.session_prefix}_{timestamp}_{self.session_id[:8]}.json" + return self.save_dir / filename + + def _get_git_commit(self) -> str | None: + try: + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + capture_output=True, + cwd=self.workdir, + stdin=subprocess.DEVNULL if is_windows() else None, + text=True, + timeout=5.0, + ) + if result.returncode == 0 and result.stdout: + return result.stdout.strip() + except (FileNotFoundError, OSError, subprocess.TimeoutExpired): + pass + return None + + def _get_git_branch(self) -> str | None: + try: + result = subprocess.run( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], + capture_output=True, + cwd=self.workdir, + stdin=subprocess.DEVNULL if is_windows() else None, + text=True, + timeout=5.0, + ) + if result.returncode == 0 and result.stdout: + return result.stdout.strip() + except (FileNotFoundError, OSError, subprocess.TimeoutExpired): + pass + return None + + def _get_username(self) -> str: + try: + return getpass.getuser() + except Exception: + return "unknown" + + def _initialize_session_metadata(self) -> SessionMetadata: + git_commit = self._get_git_commit() + git_branch = self._get_git_branch() + user_name = self._get_username() + + return SessionMetadata( + session_id=self.session_id, + start_time=self.session_start_time, + end_time=None, + git_commit=git_commit, + git_branch=git_branch, + auto_approve=self.auto_approve, + username=user_name, + environment={"working_directory": str(self.workdir)}, + ) + + async def save_interaction( + self, + messages: list[LLMMessage], + stats: AgentStats, + config: VibeConfig, + tool_manager: ToolManager, + ) -> str | None: + if not self.enabled or self.filepath is None: + return None + + if self.session_metadata is None: + return None + + active_tools = get_active_tool_classes(tool_manager, config) + + tools_available = [ + { + "type": "function", + "function": { + "name": tool_class.get_name(), + "description": tool_class.description, + "parameters": tool_class.get_parameters(), + }, + } + for tool_class in active_tools + ] + + interaction_data = { + "metadata": { + **self.session_metadata.model_dump(), + "end_time": datetime.now().isoformat(), + "stats": stats.model_dump(), + "total_messages": len(messages), + "tools_available": tools_available, + "agent_config": config.model_dump(mode="json"), + }, + "messages": [m.model_dump(exclude_none=True) for m in messages], + } + + try: + json_content = json.dumps(interaction_data, indent=2, ensure_ascii=False) + + async with aiofiles.open(self.filepath, "w", encoding="utf-8") as f: + await f.write(json_content) + + return str(self.filepath) + except Exception: + return None + + def reset_session(self, session_id: str) -> None: + if not self.enabled: + return + + self.session_id = session_id + self.session_start_time = datetime.now().isoformat() + self.filepath = self._get_save_filepath() + self.session_metadata = self._initialize_session_metadata() + + def get_session_info( + self, messages: list[dict[str, Any]], stats: AgentStats + ) -> SessionInfo: + if not self.enabled or self.save_dir is None: + return SessionInfo( + session_id="disabled", + start_time="N/A", + message_count=len(messages), + stats=stats, + save_dir="N/A", + ) + + return SessionInfo( + session_id=self.session_id, + start_time=self.session_start_time, + message_count=len(messages), + stats=stats, + save_dir=str(self.save_dir), + ) + + @staticmethod + def find_latest_session(config: SessionLoggingConfig) -> Path | None: + save_dir = Path(config.save_dir) + if not save_dir.exists(): + return None + + pattern = f"{config.session_prefix}_*.json" + session_files = list(save_dir.glob(pattern)) + + if not session_files: + return None + + return max(session_files, key=lambda p: p.stat().st_mtime) + + @staticmethod + def find_session_by_id( + session_id: str, config: SessionLoggingConfig + ) -> Path | None: + save_dir = Path(config.save_dir) + if not save_dir.exists(): + return None + + # If it's a full UUID, extract the short form (first 8 chars) + short_id = session_id.split("-")[0] if "-" in session_id else session_id + + # Try exact match first, then partial + patterns = [ + f"{config.session_prefix}_*_{short_id}.json", # Exact short UUID + f"{config.session_prefix}_*_{short_id}*.json", # Partial UUID + ] + + for pattern in patterns: + matches = list(save_dir.glob(pattern)) + if matches: + return ( + max(matches, key=lambda p: p.stat().st_mtime) + if len(matches) > 1 + else matches[0] + ) + + return None + + @staticmethod + def load_session(filepath: Path) -> tuple[list[LLMMessage], dict[str, Any]]: + with filepath.open("r", encoding="utf-8") as f: + content = f.read() + + data = json.loads(content) + messages = [LLMMessage.model_validate(msg) for msg in data.get("messages", [])] + metadata = data.get("metadata", {}) + + return messages, metadata diff --git a/vibe/core/llm/__init__.py b/vibe/core/llm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/core/llm/backend/__init__.py b/vibe/core/llm/backend/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/core/llm/backend/factory.py b/vibe/core/llm/backend/factory.py new file mode 100644 index 0000000..d3bb919 --- /dev/null +++ b/vibe/core/llm/backend/factory.py @@ -0,0 +1,7 @@ +from __future__ import annotations + +from vibe.core.config import Backend +from vibe.core.llm.backend.generic import GenericBackend +from vibe.core.llm.backend.mistral import MistralBackend + +BACKEND_FACTORY = {Backend.MISTRAL: MistralBackend, Backend.GENERIC: GenericBackend} diff --git a/vibe/core/llm/backend/generic.py b/vibe/core/llm/backend/generic.py new file mode 100644 index 0000000..d4ae12d --- /dev/null +++ b/vibe/core/llm/backend/generic.py @@ -0,0 +1,409 @@ +from __future__ import annotations + +from collections.abc import AsyncGenerator, Callable +import json +import os +import types +from typing import TYPE_CHECKING, Any, ClassVar, NamedTuple, Protocol, TypeVar + +import httpx + +from vibe.core.llm.exceptions import BackendErrorBuilder +from vibe.core.types import ( + AvailableTool, + LLMChunk, + LLMMessage, + LLMUsage, + Role, + StrToolChoice, +) +from vibe.core.utils import async_generator_retry, async_retry + +if TYPE_CHECKING: + from vibe.core.config import ModelConfig, ProviderConfig + + +class PreparedRequest(NamedTuple): + endpoint: str + headers: dict[str, str] + body: bytes + + +class APIAdapter(Protocol): + endpoint: ClassVar[str] + + def prepare_request( + self, + *, + model_name: str, + messages: list[LLMMessage], + temperature: float, + tools: list[AvailableTool] | None, + max_tokens: int | None, + tool_choice: StrToolChoice | AvailableTool | None, + enable_streaming: bool, + provider: ProviderConfig, + api_key: str | None = None, + ) -> PreparedRequest: ... + + def parse_response(self, data: dict[str, Any]) -> LLMChunk: ... + + +BACKEND_ADAPTERS: dict[str, APIAdapter] = {} + +T = TypeVar("T", bound=APIAdapter) + + +def register_adapter( + adapters: dict[str, APIAdapter], name: str +) -> Callable[[type[T]], type[T]]: + + def decorator(cls: type[T]) -> type[T]: + adapters[name] = cls() + return cls + + return decorator + + +@register_adapter(BACKEND_ADAPTERS, "openai") +class OpenAIAdapter(APIAdapter): + endpoint: ClassVar[str] = "/chat/completions" + + def build_payload( + self, + model_name: str, + converted_messages: list[dict[str, Any]], + temperature: float, + tools: list[AvailableTool] | None, + max_tokens: int | None, + tool_choice: StrToolChoice | AvailableTool | None, + ) -> dict[str, Any]: + payload = { + "model": model_name, + "messages": converted_messages, + "temperature": temperature, + } + + if tools: + payload["tools"] = [tool.model_dump(exclude_none=True) for tool in tools] + if tool_choice: + payload["tool_choice"] = ( + tool_choice + if isinstance(tool_choice, str) + else tool_choice.model_dump() + ) + if max_tokens is not None: + payload["max_tokens"] = max_tokens + + return payload + + def build_headers(self, api_key: str | None = None) -> dict[str, str]: + headers = {"Content-Type": "application/json"} + if api_key: + headers["Authorization"] = f"Bearer {api_key}" + return headers + + def prepare_request( + self, + *, + model_name: str, + messages: list[LLMMessage], + temperature: float, + tools: list[AvailableTool] | None, + max_tokens: int | None, + tool_choice: StrToolChoice | AvailableTool | None, + enable_streaming: bool, + provider: ProviderConfig, + api_key: str | None = None, + ) -> PreparedRequest: + converted_messages = [msg.model_dump(exclude_none=True) for msg in messages] + + payload = self.build_payload( + model_name, converted_messages, temperature, tools, max_tokens, tool_choice + ) + + headers = self.build_headers(api_key) + + body = json.dumps(payload).encode("utf-8") + + return PreparedRequest(self.endpoint, headers, body) + + def parse_response(self, data: dict[str, Any]) -> LLMChunk: + if data.get("choices"): + if "message" in data["choices"][0]: + message = LLMMessage.model_validate(data["choices"][0]["message"]) + elif "delta" in data["choices"][0]: + message = LLMMessage.model_validate(data["choices"][0]["delta"]) + else: + raise ValueError("Invalid response data") + finish_reason = data["choices"][0]["finish_reason"] + + elif "message" in data: + message = LLMMessage.model_validate(data["message"]) + finish_reason = data["finish_reason"] + elif "delta" in data: + message = LLMMessage.model_validate(data["delta"]) + finish_reason = None + else: + message = LLMMessage(role=Role.assistant, content="") + finish_reason = None + + usage_data = data.get("usage") or {} + usage = LLMUsage( + prompt_tokens=usage_data.get("prompt_tokens", 0), + completion_tokens=usage_data.get("completion_tokens", 0), + ) + + return LLMChunk(message=message, usage=usage, finish_reason=finish_reason) + + +class GenericBackend: + def __init__( + self, + *, + client: httpx.AsyncClient | None = None, + provider: ProviderConfig, + timeout: float = 720.0, + ) -> None: + """Initialize the backend. + + Args: + client: Optional httpx client to use. If not provided, one will be created. + """ + self._client = client + self._owns_client = client is None + self._provider = provider + self._timeout = timeout + + async def __aenter__(self) -> GenericBackend: + if self._client is None: + self._client = httpx.AsyncClient( + timeout=httpx.Timeout(self._timeout), + limits=httpx.Limits(max_keepalive_connections=5, max_connections=10), + ) + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> None: + if self._owns_client and self._client: + await self._client.aclose() + self._client = None + + def _get_client(self) -> httpx.AsyncClient: + if self._client is None: + self._client = httpx.AsyncClient( + timeout=httpx.Timeout(self._timeout), + limits=httpx.Limits(max_keepalive_connections=5, max_connections=10), + ) + self._owns_client = True + return self._client + + async def complete( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float = 0.2, + tools: list[AvailableTool] | None = None, + max_tokens: int | None = None, + tool_choice: StrToolChoice | AvailableTool | None = None, + extra_headers: dict[str, str] | None = None, + ) -> LLMChunk: + api_key = ( + os.getenv(self._provider.api_key_env_var) + if self._provider.api_key_env_var + else None + ) + + api_style = getattr(self._provider, "api_style", "openai") + adapter = BACKEND_ADAPTERS[api_style] + + endpoint, headers, body = adapter.prepare_request( + model_name=model.name, + messages=messages, + temperature=temperature, + tools=tools, + max_tokens=max_tokens, + tool_choice=tool_choice, + enable_streaming=False, + provider=self._provider, + api_key=api_key, + ) + + if extra_headers: + headers.update(extra_headers) + + url = f"{self._provider.api_base}{endpoint}" + + try: + res_data, _ = await self._make_request(url, body, headers) + return adapter.parse_response(res_data) + + except httpx.HTTPStatusError as e: + raise BackendErrorBuilder.build_http_error( + provider=self._provider.name, + endpoint=url, + response=e.response, + headers=dict(e.response.headers.items()), + model=model.name, + messages=messages, + temperature=temperature, + has_tools=bool(tools), + tool_choice=tool_choice, + ) from e + except httpx.RequestError as e: + raise BackendErrorBuilder.build_request_error( + provider=self._provider.name, + endpoint=url, + error=e, + model=model.name, + messages=messages, + temperature=temperature, + has_tools=bool(tools), + tool_choice=tool_choice, + ) from e + + async def complete_streaming( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float = 0.2, + tools: list[AvailableTool] | None = None, + max_tokens: int | None = None, + tool_choice: StrToolChoice | AvailableTool | None = None, + extra_headers: dict[str, str] | None = None, + ) -> AsyncGenerator[LLMChunk, None]: + api_key = ( + os.getenv(self._provider.api_key_env_var) + if self._provider.api_key_env_var + else None + ) + + api_style = getattr(self._provider, "api_style", "openai") + adapter = BACKEND_ADAPTERS[api_style] + + endpoint, headers, body = adapter.prepare_request( + model_name=model.name, + messages=messages, + temperature=temperature, + tools=tools, + max_tokens=max_tokens, + tool_choice=tool_choice, + enable_streaming=True, + provider=self._provider, + api_key=api_key, + ) + + if extra_headers: + headers.update(extra_headers) + + url = f"{self._provider.api_base}{endpoint}" + + try: + async for res_data in self._make_streaming_request(url, body, headers): + yield adapter.parse_response(res_data) + + except httpx.HTTPStatusError as e: + raise BackendErrorBuilder.build_http_error( + provider=self._provider.name, + endpoint=url, + response=e.response, + headers=dict(e.response.headers.items()), + model=model.name, + messages=messages, + temperature=temperature, + has_tools=bool(tools), + tool_choice=tool_choice, + ) from e + except httpx.RequestError as e: + raise BackendErrorBuilder.build_request_error( + provider=self._provider.name, + endpoint=url, + error=e, + model=model.name, + messages=messages, + temperature=temperature, + has_tools=bool(tools), + tool_choice=tool_choice, + ) from e + + class HTTPResponse(NamedTuple): + data: dict[str, Any] + headers: dict[str, str] + + @async_retry(tries=3) + async def _make_request( + self, url: str, data: bytes, headers: dict[str, str] + ) -> HTTPResponse: + client = self._get_client() + response = await client.post(url, content=data, headers=headers) + response.raise_for_status() + + response_headers = dict(response.headers.items()) + response_body = response.json() + return self.HTTPResponse(response_body, response_headers) + + @async_generator_retry(tries=3) + async def _make_streaming_request( + self, url: str, data: bytes, headers: dict[str, str] + ) -> AsyncGenerator[dict[str, Any]]: + client = self._get_client() + async with client.stream( + method="POST", url=url, content=data, headers=headers + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.strip() == "": + continue + + DELIM_CHAR = ":" + assert f"{DELIM_CHAR} " in line, "line should look like `key: value`" + delim_index = line.find(DELIM_CHAR) + key = line[0:delim_index] + value = line[delim_index + 2 :] + + if key != "data": + # This might be the case with openrouter, so we just ignore it + continue + if value == "[DONE]": + return + yield json.loads(value.strip()) + + async def count_tokens( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float = 0.0, + tools: list[AvailableTool] | None = None, + tool_choice: StrToolChoice | AvailableTool | None = None, + extra_headers: dict[str, str] | None = None, + ) -> int: + probe_messages = list(messages) + if not probe_messages or probe_messages[-1].role != Role.user: + probe_messages.append(LLMMessage(role=Role.user, content="")) + + result = await self.complete( + model=model, + messages=probe_messages, + temperature=temperature, + tools=tools, + max_tokens=16, # Minimal amount for openrouter with openai models + tool_choice=tool_choice, + extra_headers=extra_headers, + ) + assert result.usage is not None, ( + "Usage should be present in non-streaming completions" + ) + + return result.usage.prompt_tokens + + async def close(self) -> None: + if self._owns_client and self._client: + await self._client.aclose() + self._client = None diff --git a/vibe/core/llm/backend/mistral.py b/vibe/core/llm/backend/mistral.py new file mode 100644 index 0000000..683d336 --- /dev/null +++ b/vibe/core/llm/backend/mistral.py @@ -0,0 +1,332 @@ +from __future__ import annotations + +from collections.abc import AsyncGenerator +import json +import os +import re +import types +from typing import TYPE_CHECKING, cast + +import httpx +import mistralai + +from vibe.core.llm.exceptions import BackendErrorBuilder +from vibe.core.types import ( + AvailableTool, + Content, + FunctionCall, + LLMChunk, + LLMMessage, + LLMUsage, + Role, + StrToolChoice, + ToolCall, +) + +if TYPE_CHECKING: + from vibe.core.config import ModelConfig, ProviderConfig + + +class MistralMapper: + def prepare_message(self, msg: LLMMessage) -> mistralai.Messages: + match msg.role: + case Role.system: + return mistralai.SystemMessage(role="system", content=msg.content or "") + case Role.user: + return mistralai.UserMessage(role="user", content=msg.content) + case Role.assistant: + return mistralai.AssistantMessage( + role="assistant", + content=msg.content, + tool_calls=[ + mistralai.ToolCall( + function=mistralai.FunctionCall( + name=tc.function.name or "", + arguments=tc.function.arguments or "", + ), + id=tc.id, + type=tc.type, + index=tc.index, + ) + for tc in msg.tool_calls or [] + ], + ) + case Role.tool: + return mistralai.ToolMessage( + role="tool", + content=msg.content, + tool_call_id=msg.tool_call_id, + name=msg.name, + ) + + def prepare_tool(self, tool: AvailableTool) -> mistralai.Tool: + return mistralai.Tool( + type="function", + function=mistralai.Function( + name=tool.function.name, + description=tool.function.description, + parameters=tool.function.parameters, + ), + ) + + def prepare_tool_choice( + self, tool_choice: StrToolChoice | AvailableTool + ) -> mistralai.ChatCompletionStreamRequestToolChoice: + if isinstance(tool_choice, str): + return cast(mistralai.ToolChoiceEnum, tool_choice) + + return mistralai.ToolChoice( + type="function", + function=mistralai.FunctionName(name=tool_choice.function.name), + ) + + def parse_content(self, content: mistralai.AssistantMessageContent) -> Content: + if isinstance(content, str): + return content + + concat_content = "" + for chunk in content: + if isinstance(chunk, mistralai.FileChunk): + continue + match chunk.type: + case "text": + concat_content += chunk.text + case _: + pass + return concat_content + + def parse_tool_calls(self, tool_calls: list[mistralai.ToolCall]) -> list[ToolCall]: + return [ + ToolCall( + id=tool_call.id, + function=FunctionCall( + name=tool_call.function.name, + arguments=tool_call.function.arguments + if isinstance(tool_call.function.arguments, str) + else json.dumps(tool_call.function.arguments), + ), + index=tool_call.index, + ) + for tool_call in tool_calls + ] + + +class MistralBackend: + def __init__(self, provider: ProviderConfig, timeout: float = 720.0) -> None: + self._client: mistralai.Mistral | None = None + self._provider = provider + self._mapper = MistralMapper() + self._api_key = ( + os.getenv(self._provider.api_key_env_var) + if self._provider.api_key_env_var + else None + ) + + # Mistral SDK takes server URL without api version as input + url_pattern = r"(https?://[^/]+)(/v.*)" + match = re.match(url_pattern, self._provider.api_base) + if not match: + raise ValueError( + f"Invalid API base URL: {self._provider.api_base}. " + "Expected format: /v" + ) + self._server_url = match.group(1) + self._timeout = timeout + + async def __aenter__(self) -> MistralBackend: + self._client = mistralai.Mistral( + api_key=self._api_key, + server_url=self._server_url, + timeout_ms=int(self._timeout * 1000), + ) + await self._client.__aenter__() + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> None: + if self._client is not None: + await self._client.__aexit__( + exc_type=exc_type, exc_val=exc_val, exc_tb=exc_tb + ) + + def _get_client(self) -> mistralai.Mistral: + if self._client is None: + self._client = mistralai.Mistral( + api_key=self._api_key, server_url=self._server_url + ) + return self._client + + async def complete( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float, + tools: list[AvailableTool] | None, + max_tokens: int | None, + tool_choice: StrToolChoice | AvailableTool | None, + extra_headers: dict[str, str] | None, + ) -> LLMChunk: + try: + response = await self._get_client().chat.complete_async( + model=model.name, + messages=[self._mapper.prepare_message(msg) for msg in messages], + temperature=temperature, + tools=[self._mapper.prepare_tool(tool) for tool in tools] + if tools + else None, + max_tokens=max_tokens, + tool_choice=self._mapper.prepare_tool_choice(tool_choice) + if tool_choice + else None, + http_headers=extra_headers, + stream=False, + ) + + return LLMChunk( + message=LLMMessage( + role=Role.assistant, + content=self._mapper.parse_content( + response.choices[0].message.content + ) + if response.choices[0].message.content + else "", + tool_calls=self._mapper.parse_tool_calls( + response.choices[0].message.tool_calls + ) + if response.choices[0].message.tool_calls + else None, + ), + usage=LLMUsage( + prompt_tokens=response.usage.prompt_tokens or 0, + completion_tokens=response.usage.completion_tokens or 0, + ), + finish_reason=response.choices[0].finish_reason, + ) + + except mistralai.SDKError as e: + raise BackendErrorBuilder.build_http_error( + provider=self._provider.name, + endpoint=self._server_url, + response=e.raw_response, + headers=dict(e.raw_response.headers.items()), + model=model.name, + messages=messages, + temperature=temperature, + has_tools=bool(tools), + tool_choice=tool_choice, + ) from e + except httpx.RequestError as e: + raise BackendErrorBuilder.build_request_error( + provider=self._provider.name, + endpoint=self._server_url, + error=e, + model=model.name, + messages=messages, + temperature=temperature, + has_tools=bool(tools), + tool_choice=tool_choice, + ) from e + + async def complete_streaming( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float, + tools: list[AvailableTool] | None, + max_tokens: int | None, + tool_choice: StrToolChoice | AvailableTool | None, + extra_headers: dict[str, str] | None, + ) -> AsyncGenerator[LLMChunk, None]: + try: + async for chunk in await self._get_client().chat.stream_async( + model=model.name, + messages=[self._mapper.prepare_message(msg) for msg in messages], + temperature=temperature, + tools=[self._mapper.prepare_tool(tool) for tool in tools] + if tools + else None, + max_tokens=max_tokens, + tool_choice=self._mapper.prepare_tool_choice(tool_choice) + if tool_choice + else None, + http_headers=extra_headers, + ): + yield LLMChunk( + message=LLMMessage( + role=Role.assistant, + content=self._mapper.parse_content( + chunk.data.choices[0].delta.content + ) + if chunk.data.choices[0].delta.content + else "", + tool_calls=self._mapper.parse_tool_calls( + chunk.data.choices[0].delta.tool_calls + ) + if chunk.data.choices[0].delta.tool_calls + else None, + ), + usage=LLMUsage( + prompt_tokens=chunk.data.usage.prompt_tokens or 0 + if chunk.data.usage + else 0, + completion_tokens=chunk.data.usage.completion_tokens or 0 + if chunk.data.usage + else 0, + ), + finish_reason=chunk.data.choices[0].finish_reason, + ) + + except mistralai.SDKError as e: + raise BackendErrorBuilder.build_http_error( + provider=self._provider.name, + endpoint=self._server_url, + response=e.raw_response, + headers=dict(e.raw_response.headers.items()), + model=model.name, + messages=messages, + temperature=temperature, + has_tools=bool(tools), + tool_choice=tool_choice, + ) from e + except httpx.RequestError as e: + raise BackendErrorBuilder.build_request_error( + provider=self._provider.name, + endpoint=self._server_url, + error=e, + model=model.name, + messages=messages, + temperature=temperature, + has_tools=bool(tools), + tool_choice=tool_choice, + ) from e + + async def count_tokens( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float = 0.0, + tools: list[AvailableTool] | None = None, + tool_choice: StrToolChoice | AvailableTool | None = None, + extra_headers: dict[str, str] | None = None, + ) -> int: + result = await self.complete( + model=model, + messages=messages, + temperature=temperature, + tools=tools, + max_tokens=1, + tool_choice=tool_choice, + extra_headers=extra_headers, + ) + assert result.usage is not None, ( + "Usage should be present in non-streaming completions" + ) + + return result.usage.prompt_tokens diff --git a/vibe/core/llm/exceptions.py b/vibe/core/llm/exceptions.py new file mode 100644 index 0000000..674a601 --- /dev/null +++ b/vibe/core/llm/exceptions.py @@ -0,0 +1,195 @@ +from __future__ import annotations + +from collections.abc import Mapping +from http import HTTPStatus +import json +from typing import Any + +import httpx +from pydantic import BaseModel, ConfigDict, ValidationError + +from vibe.core.types import AvailableTool, LLMMessage, StrToolChoice + + +class ErrorDetail(BaseModel): + model_config = ConfigDict(extra="ignore") + message: str | None = None + + +class PayloadSummary(BaseModel): + model: str + message_count: int + approx_chars: int + temperature: float + has_tools: bool + tool_choice: StrToolChoice | AvailableTool | None + + +class BackendError(RuntimeError): + def __init__( + self, + *, + provider: str, + endpoint: str, + status: int | None, + reason: str | None, + headers: Mapping[str, str] | None, + body_text: str | None, + parsed_error: str | None, + model: str, + payload_summary: PayloadSummary, + ) -> None: + self.provider = provider + self.endpoint = endpoint + self.status = status + self.reason = reason + self.headers = {k.lower(): v for k, v in (headers or {}).items()} + self.body_text = body_text or "" + self.parsed_error = parsed_error + self.model = model + self.payload_summary = payload_summary + super().__init__(self._fmt()) + + def _fmt(self) -> str: + if self.status == HTTPStatus.UNAUTHORIZED: + return "Invalid API key. Please check your API key and try again." + + if self.status == HTTPStatus.TOO_MANY_REQUESTS: + return "Rate limit exceeded. Please wait a moment before trying again." + + rid = self.headers.get("x-request-id") or self.headers.get("request-id") + status_label = ( + f"{self.status} {HTTPStatus(self.status).phrase}" if self.status else "N/A" + ) + parts = [ + f"LLM backend error [{self.provider}]", + f" status: {status_label}", + f" reason: {self.reason or 'N/A'}", + f" request_id: {rid or 'N/A'}", + f" endpoint: {self.endpoint}", + f" model: {self.model}", + f" provider_message: {self.parsed_error or 'N/A'}", + f" body_excerpt: {self._excerpt(self.body_text)}", + f" payload_summary: {self.payload_summary.model_dump_json(exclude_none=True)}", + ] + return "\n".join(parts) + + @staticmethod + def _excerpt(s: str, *, n: int = 400) -> str: + s = s.strip().replace("\n", " ") + return s[:n] + ("…" if len(s) > n else "") + + +class ErrorResponse(BaseModel): + model_config = ConfigDict(extra="ignore") + + error: ErrorDetail | dict[str, Any] | None = None + message: str | None = None + detail: str | None = None + + @property + def primary_message(self) -> str | None: + if e := self.error: + match e: + case {"message": str(m)}: + return m + case {"type": str(t)}: + return f"Error: {t}" + case ErrorDetail(message=str(m)): + return m + if m := self.message: + return m + if d := self.detail: + return d + return None + + +class BackendErrorBuilder: + @classmethod + def build_http_error( + cls, + *, + provider: str, + endpoint: str, + response: httpx.Response, + headers: Mapping[str, str] | None, + model: str, + messages: list[LLMMessage], + temperature: float, + has_tools: bool, + tool_choice: StrToolChoice | AvailableTool | None, + ) -> BackendError: + try: + body_text = response.text + except Exception: # On streaming responses, we can't read the body + body_text = None + + return BackendError( + provider=provider, + endpoint=endpoint, + status=response.status_code, + reason=response.reason_phrase, + headers=headers or {}, + body_text=body_text, + parsed_error=cls._parse_provider_error(body_text), + model=model, + payload_summary=cls._payload_summary( + model, messages, temperature, has_tools, tool_choice + ), + ) + + @classmethod + def build_request_error( + cls, + *, + provider: str, + endpoint: str, + error: httpx.RequestError, + model: str, + messages: list[LLMMessage], + temperature: float, + has_tools: bool, + tool_choice: StrToolChoice | AvailableTool | None, + ) -> BackendError: + return BackendError( + provider=provider, + endpoint=endpoint, + status=None, + reason=str(error) or repr(error), + headers={}, + body_text=None, + parsed_error="Network error", + model=model, + payload_summary=cls._payload_summary( + model, messages, temperature, has_tools, tool_choice + ), + ) + + @staticmethod + def _parse_provider_error(body_text: str | None) -> str | None: + if not body_text: + return None + try: + data = json.loads(body_text) + error_model = ErrorResponse.model_validate(data) + return error_model.primary_message + except (json.JSONDecodeError, ValidationError): + return None + + @staticmethod + def _payload_summary( + model_name: str, + messages: list[LLMMessage], + temperature: float, + has_tools: bool, + tool_choice: StrToolChoice | AvailableTool | None, + ) -> PayloadSummary: + total_chars = sum(len(m.content or "") for m in messages) + return PayloadSummary( + model=model_name, + message_count=len(messages), + approx_chars=total_chars, + temperature=temperature, + has_tools=has_tools, + tool_choice=tool_choice, + ) diff --git a/vibe/core/llm/format.py b/vibe/core/llm/format.py new file mode 100644 index 0000000..20c546f --- /dev/null +++ b/vibe/core/llm/format.py @@ -0,0 +1,270 @@ +from __future__ import annotations + +from fnmatch import fnmatch +from functools import lru_cache +import json +import re +from typing import TYPE_CHECKING, Any + +from pydantic import BaseModel, ConfigDict, Field, ValidationError + +from vibe.core.tools.base import BaseTool +from vibe.core.types import ( + AvailableFunction, + AvailableTool, + LLMMessage, + Role, + StrToolChoice, +) + +if TYPE_CHECKING: + from vibe.core.config import VibeConfig + from vibe.core.tools.manager import ToolManager + + +def _is_regex_hint(pattern: str) -> bool: + """Heuristically detect whether a pattern looks like a regex. + + - Explicit regex: starts with 're:' + - Heuristic regex: contains common regex metachars or '.*' + """ + if pattern.startswith("re:"): + return True + return bool(re.search(r"[().+|^$]", pattern) or ".*" in pattern) + + +@lru_cache(maxsize=256) +def _compile_icase(expr: str) -> re.Pattern | None: + try: + return re.compile(expr, re.IGNORECASE) + except re.error: + return None + + +def _regex_match_icase(expr: str, s: str) -> bool: + rx = _compile_icase(expr) + return rx is not None and rx.fullmatch(s) is not None + + +def _name_matches(name: str, patterns: list[str]) -> bool: + """Check if a tool name matches any of the provided patterns. + + Supports three forms (case-insensitive): + - Exact names (no wildcards/regex tokens) + - Glob wildcards using fnmatch (e.g., 'serena_*') + - Regex when prefixed with 're:' + or when the pattern looks regex-y (e.g., 'serena.*') + """ + n = name.lower() + for raw in patterns: + if not (p := (raw or "").strip()): + continue + + match p: + case _ if p.startswith("re:"): + if _regex_match_icase(p.removeprefix("re:"), name): + return True + case _ if _is_regex_hint(p): + if _regex_match_icase(p, name): + return True + case _: + if fnmatch(n, p.lower()): + return True + + return False + + +def get_active_tool_classes( + tool_manager: ToolManager, config: VibeConfig +) -> list[type[BaseTool]]: + """Returns a list of active tool classes based on the configuration. + + Args: + tool_manager: ToolManager instance with discovered tools + config: VibeConfig with enabled_tools/disabled_tools settings + """ + all_tools = list(tool_manager.available_tools().values()) + + if config.enabled_tools: + return [ + tool_class + for tool_class in all_tools + if _name_matches(tool_class.get_name(), config.enabled_tools) + ] + + if config.disabled_tools: + return [ + tool_class + for tool_class in all_tools + if not _name_matches(tool_class.get_name(), config.disabled_tools) + ] + + return all_tools + + +class ParsedToolCall(BaseModel): + model_config = ConfigDict(frozen=True) + tool_name: str + raw_args: dict[str, Any] + call_id: str = "" + + +class ResolvedToolCall(BaseModel): + model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True) + tool_name: str + tool_class: type[BaseTool] + validated_args: BaseModel + call_id: str = "" + + @property + def args_dict(self) -> dict[str, Any]: + return self.validated_args.model_dump() + + +class FailedToolCall(BaseModel): + model_config = ConfigDict(frozen=True) + tool_name: str + call_id: str + error: str + + +class ParsedMessage(BaseModel): + model_config = ConfigDict(frozen=True) + tool_calls: list[ParsedToolCall] + + +class ResolvedMessage(BaseModel): + model_config = ConfigDict(frozen=True) + tool_calls: list[ResolvedToolCall] + failed_calls: list[FailedToolCall] = Field(default_factory=list) + + +class APIToolFormatHandler: + @property + def name(self) -> str: + return "api" + + def get_available_tools( + self, tool_manager: ToolManager, config: VibeConfig + ) -> list[AvailableTool]: + active_tools = get_active_tool_classes(tool_manager, config) + + return [ + AvailableTool( + function=AvailableFunction( + name=tool_class.get_name(), + description=tool_class.description, + parameters=tool_class.get_parameters(), + ) + ) + for tool_class in active_tools + ] + + def get_tool_choice(self) -> StrToolChoice | AvailableTool: + return "auto" + + def process_api_response_message(self, message: Any) -> LLMMessage: + clean_message = {"role": message.role, "content": message.content} + + if message.tool_calls: + clean_message["tool_calls"] = [ + { + "id": tc.id, + "index": tc.index, + "type": "function", + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in message.tool_calls + ] + + return LLMMessage.model_validate(clean_message) + + def parse_message(self, message: LLMMessage) -> ParsedMessage: + tool_calls = [] + + api_tool_calls = message.tool_calls or [] + for tc in api_tool_calls: + if not (function_call := tc.function): + continue + try: + args = json.loads(function_call.arguments or "{}") + except json.JSONDecodeError: + args = {} + + tool_calls.append( + ParsedToolCall( + tool_name=function_call.name or "", + raw_args=args, + call_id=tc.id or "", + ) + ) + + return ParsedMessage(tool_calls=tool_calls) + + def resolve_tool_calls( + self, parsed: ParsedMessage, tool_manager: ToolManager, config: VibeConfig + ) -> ResolvedMessage: + resolved_calls = [] + failed_calls = [] + + active_tools = { + tool_class.get_name(): tool_class + for tool_class in get_active_tool_classes(tool_manager, config) + } + + for parsed_call in parsed.tool_calls: + tool_class = active_tools.get(parsed_call.tool_name) + if not tool_class: + failed_calls.append( + FailedToolCall( + tool_name=parsed_call.tool_name, + call_id=parsed_call.call_id, + error=f"Unknown tool '{parsed_call.tool_name}'", + ) + ) + continue + + args_model, _ = tool_class._get_tool_args_results() + try: + validated_args = args_model.model_validate(parsed_call.raw_args) + resolved_calls.append( + ResolvedToolCall( + tool_name=parsed_call.tool_name, + tool_class=tool_class, + validated_args=validated_args, + call_id=parsed_call.call_id, + ) + ) + except ValidationError as e: + failed_calls.append( + FailedToolCall( + tool_name=parsed_call.tool_name, + call_id=parsed_call.call_id, + error=f"Invalid arguments: {e}", + ) + ) + + return ResolvedMessage(tool_calls=resolved_calls, failed_calls=failed_calls) + + def create_tool_response_message( + self, tool_call: ResolvedToolCall, result_text: str + ) -> LLMMessage: + return LLMMessage( + role=Role.tool, + tool_call_id=tool_call.call_id, + name=tool_call.tool_name, + content=result_text, + ) + + def create_failed_tool_response_message( + self, failed: FailedToolCall, error_content: str + ) -> LLMMessage: + return LLMMessage( + role=Role.tool, + tool_call_id=failed.call_id, + name=failed.tool_name, + content=error_content, + ) diff --git a/vibe/core/llm/types.py b/vibe/core/llm/types.py new file mode 100644 index 0000000..5cb24d5 --- /dev/null +++ b/vibe/core/llm/types.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +from collections.abc import AsyncGenerator +import types +from typing import TYPE_CHECKING, Protocol + +from vibe.core.types import AvailableTool, LLMChunk, LLMMessage, StrToolChoice + +if TYPE_CHECKING: + from vibe.core.config import ModelConfig + + +class BackendLike(Protocol): + """Port protocol for dependency-injectable LLM backends. + + Any backend used by Agent should implement this async context manager + interface with `complete`, `complete_streaming` and `count_tokens` methods. + """ + + async def __aenter__(self) -> BackendLike: ... + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: types.TracebackType | None, + ) -> None: ... + + async def complete( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float, + tools: list[AvailableTool] | None, + max_tokens: int | None, + tool_choice: StrToolChoice | AvailableTool | None, + extra_headers: dict[str, str] | None, + ) -> LLMChunk: + """Complete a chat conversation using the specified model and provider. + + Args: + model: Model configuration + messages: List of conversation messages + temperature: Sampling temperature (0.0 to 1.0) + tools: Optional list of available tools + max_tokens: Maximum tokens to generate + tool_choice: How to choose tools (auto, none, or specific tool) + extra_headers: Additional HTTP headers to include + + Returns: + LLMChunk containing the response message and usage information + + Raises: + BackendError: If the API request fails + """ + ... + + # Note: actual implementation should be an async function, + # but we can't make this one async, as it would lead to wrong type inference + # https://stackoverflow.com/a/68911014 + def complete_streaming( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float, + tools: list[AvailableTool] | None, + max_tokens: int | None, + tool_choice: StrToolChoice | AvailableTool | None, + extra_headers: dict[str, str] | None, + ) -> AsyncGenerator[LLMChunk, None]: + """Equivalent of the complete method, but yields LLMEvent objects + instead of a single LLMEvent. + + Args: + model: Model configuration + messages: List of conversation messages + temperature: Sampling temperature (0.0 to 1.0) + tools: Optional list of available tools + max_tokens: Maximum tokens to generate + tool_choice: How to choose tools (auto, none, or specific tool) + extra_headers: Additional HTTP headers to include + + Returns: + AsyncGenerator[LLMEvent, None] yielding LLMEvent objects + + Raises: + BackendError: If the API request fails + """ + ... + + async def count_tokens( + self, + *, + model: ModelConfig, + messages: list[LLMMessage], + temperature: float = 0.0, + tools: list[AvailableTool] | None, + tool_choice: StrToolChoice | AvailableTool | None = None, + extra_headers: dict[str, str] | None, + ) -> int: + """Count the number of tokens in the prompt without generating a real response. + + This is useful for: + - Determining system prompt token count + - Checking context size after compaction + - Pre-flight token validation + + Args: + model: Model configuration + messages: List of messages to count tokens for + temperature: Sampling temperature + tools: Optional list of available tools + tool_choice: How to choose tools + extra_headers: Additional HTTP headers to include + + Returns: + The number of prompt tokens + """ + ... diff --git a/vibe/core/middleware.py b/vibe/core/middleware.py new file mode 100644 index 0000000..6b198d0 --- /dev/null +++ b/vibe/core/middleware.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from enum import StrEnum, auto +from typing import TYPE_CHECKING, Any, Protocol + +from vibe.core.utils import VIBE_WARNING_TAG + +if TYPE_CHECKING: + from vibe.core.config import VibeConfig + from vibe.core.types import AgentStats, LLMMessage + + +class MiddlewareAction(StrEnum): + CONTINUE = auto() + STOP = auto() + COMPACT = auto() + INJECT_MESSAGE = auto() + + +class ResetReason(StrEnum): + STOP = auto() + COMPACT = auto() + + +@dataclass +class ConversationContext: + messages: list[LLMMessage] + stats: AgentStats + config: VibeConfig + + +@dataclass +class MiddlewareResult: + action: MiddlewareAction = MiddlewareAction.CONTINUE + message: str | None = None + reason: str | None = None + metadata: dict[str, Any] = field(default_factory=dict) + + +class ConversationMiddleware(Protocol): + async def before_turn(self, context: ConversationContext) -> MiddlewareResult: ... + + async def after_turn(self, context: ConversationContext) -> MiddlewareResult: ... + + def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: ... + + +class TurnLimitMiddleware: + def __init__(self, max_turns: int) -> None: + self.max_turns = max_turns + + async def before_turn(self, context: ConversationContext) -> MiddlewareResult: + if context.stats.steps - 1 >= self.max_turns: + return MiddlewareResult( + action=MiddlewareAction.STOP, + reason=f"Turn limit of {self.max_turns} reached", + ) + return MiddlewareResult() + + async def after_turn(self, context: ConversationContext) -> MiddlewareResult: + return MiddlewareResult() + + def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: + pass + + +class PriceLimitMiddleware: + def __init__(self, max_price: float) -> None: + self.max_price = max_price + + async def before_turn(self, context: ConversationContext) -> MiddlewareResult: + if context.stats.session_cost > self.max_price: + return MiddlewareResult( + action=MiddlewareAction.STOP, + reason=f"Price limit exceeded: ${context.stats.session_cost:.4f} > ${self.max_price:.2f}", + ) + return MiddlewareResult() + + async def after_turn(self, context: ConversationContext) -> MiddlewareResult: + return MiddlewareResult() + + def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: + pass + + +class AutoCompactMiddleware: + def __init__(self, threshold: int) -> None: + self.threshold = threshold + + async def before_turn(self, context: ConversationContext) -> MiddlewareResult: + if context.stats.context_tokens >= self.threshold: + return MiddlewareResult( + action=MiddlewareAction.COMPACT, + metadata={ + "old_tokens": context.stats.context_tokens, + "threshold": self.threshold, + }, + ) + return MiddlewareResult() + + async def after_turn(self, context: ConversationContext) -> MiddlewareResult: + return MiddlewareResult() + + def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: + pass + + +class ContextWarningMiddleware: + def __init__( + self, threshold_percent: float = 0.5, max_context: int | None = None + ) -> None: + self.threshold_percent = threshold_percent + self.max_context = max_context + self.has_warned = False + + async def before_turn(self, context: ConversationContext) -> MiddlewareResult: + if self.has_warned: + return MiddlewareResult() + + max_context = self.max_context + if max_context is None: + return MiddlewareResult() + + if context.stats.context_tokens >= max_context * self.threshold_percent: + self.has_warned = True + + percentage_used = (context.stats.context_tokens / max_context) * 100 + warning_msg = f"<{VIBE_WARNING_TAG}>You have used {percentage_used:.0f}% of your total context ({context.stats.context_tokens:,}/{max_context:,} tokens)" + + return MiddlewareResult( + action=MiddlewareAction.INJECT_MESSAGE, message=warning_msg + ) + + return MiddlewareResult() + + async def after_turn(self, context: ConversationContext) -> MiddlewareResult: + return MiddlewareResult() + + def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: + self.has_warned = False + + +class MiddlewarePipeline: + def __init__(self) -> None: + self.middlewares: list[ConversationMiddleware] = [] + + def add(self, middleware: ConversationMiddleware) -> MiddlewarePipeline: + self.middlewares.append(middleware) + return self + + def clear(self) -> None: + self.middlewares.clear() + + def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: + for mw in self.middlewares: + mw.reset(reset_reason) + + async def run_before_turn(self, context: ConversationContext) -> MiddlewareResult: + messages_to_inject = [] + + for mw in self.middlewares: + result = await mw.before_turn(context) + if result.action == MiddlewareAction.INJECT_MESSAGE and result.message: + messages_to_inject.append(result.message) + elif result.action in {MiddlewareAction.STOP, MiddlewareAction.COMPACT}: + return result + if messages_to_inject: + combined_message = "\n\n".join(messages_to_inject) + return MiddlewareResult( + action=MiddlewareAction.INJECT_MESSAGE, message=combined_message + ) + + return MiddlewareResult() + + async def run_after_turn(self, context: ConversationContext) -> MiddlewareResult: + messages_to_inject = [] + + for mw in self.middlewares: + result = await mw.after_turn(context) + if result.action == MiddlewareAction.INJECT_MESSAGE and result.message: + messages_to_inject.append(result.message) + elif result.action in {MiddlewareAction.STOP, MiddlewareAction.COMPACT}: + return result + if messages_to_inject: + combined_message = "\n\n".join(messages_to_inject) + return MiddlewareResult( + action=MiddlewareAction.INJECT_MESSAGE, message=combined_message + ) + + return MiddlewareResult() diff --git a/vibe/core/output_formatters.py b/vibe/core/output_formatters.py new file mode 100644 index 0000000..2c26fd7 --- /dev/null +++ b/vibe/core/output_formatters.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +import json +import sys +from typing import TextIO + +from vibe.core.types import AssistantEvent, BaseEvent, LLMMessage, OutputFormat + + +class OutputFormatter(ABC): + def __init__(self, stream: TextIO = sys.stdout) -> None: + self.stream = stream + self._messages: list[LLMMessage] = [] + self._final_response: str | None = None + + @abstractmethod + def on_message_added(self, message: LLMMessage) -> None: + pass + + @abstractmethod + def on_event(self, event: BaseEvent) -> None: + pass + + @abstractmethod + def finalize(self) -> str | None: + """Finalize output and return any final text to be printed. + + Returns: + String to print, or None if formatter handles its own output + """ + pass + + +class TextOutputFormatter(OutputFormatter): + def on_message_added(self, message: LLMMessage) -> None: + self._messages.append(message) + + def on_event(self, event: BaseEvent) -> None: + if isinstance(event, AssistantEvent): + self._final_response = event.content + + def finalize(self) -> str | None: + return self._final_response + + +class JsonOutputFormatter(OutputFormatter): + def on_message_added(self, message: LLMMessage) -> None: + self._messages.append(message) + + def on_event(self, event: BaseEvent) -> None: + pass + + def finalize(self) -> str | None: + messages_data = [msg.model_dump(mode="json") for msg in self._messages] + json.dump(messages_data, self.stream, indent=2) + self.stream.write("\n") + self.stream.flush() + return None + + +class StreamingJsonOutputFormatter(OutputFormatter): + def on_message_added(self, message: LLMMessage) -> None: + json.dump(message.model_dump(mode="json"), self.stream) + self.stream.write("\n") + self.stream.flush() + + def on_event(self, event: BaseEvent) -> None: + pass + + def finalize(self) -> str | None: + return None + + +def create_formatter( + format_type: OutputFormat, stream: TextIO = sys.stdout +) -> OutputFormatter: + formatters = { + OutputFormat.TEXT: TextOutputFormatter, + OutputFormat.JSON: JsonOutputFormatter, + OutputFormat.STREAMING: StreamingJsonOutputFormatter, + } + + formatter_class = formatters.get(format_type, TextOutputFormatter) + return formatter_class(stream) diff --git a/vibe/core/programmatic.py b/vibe/core/programmatic.py new file mode 100644 index 0000000..2dcc22c --- /dev/null +++ b/vibe/core/programmatic.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import asyncio + +from vibe.core.agent import Agent +from vibe.core.config import VibeConfig +from vibe.core.output_formatters import create_formatter +from vibe.core.types import AssistantEvent, LLMMessage, OutputFormat, Role +from vibe.core.utils import ConversationLimitException, logger + + +def run_programmatic( + config: VibeConfig, + prompt: str, + max_turns: int | None = None, + max_price: float | None = None, + output_format: OutputFormat = OutputFormat.TEXT, + previous_messages: list[LLMMessage] | None = None, + auto_approve: bool = True, +) -> str | None: + """Run in programmatic mode: execute prompt and return the assistant response. + + Args: + config: Configuration for the Vibe agent + prompt: The user prompt to process + max_turns: Maximum number of assistant turns (LLM calls) to allow + max_price: Maximum cost in dollars before stopping + output_format: Format for the output + previous_messages: Optional messages from a previous session to continue + auto_approve: Whether to automatically approve tool execution + + Returns: + The final assistant response text, or None if no response + """ + formatter = create_formatter(output_format) + + agent = Agent( + config, + auto_approve=auto_approve, + message_observer=formatter.on_message_added, + max_turns=max_turns, + max_price=max_price, + enable_streaming=False, + ) + logger.info("USER: %s", prompt) + + async def _async_run() -> str | None: + if previous_messages: + non_system_messages = [ + msg for msg in previous_messages if not (msg.role == Role.system) + ] + agent.messages.extend(non_system_messages) + logger.info( + "Loaded %d messages from previous session", len(non_system_messages) + ) + + async for event in agent.act(prompt): + formatter.on_event(event) + if isinstance(event, AssistantEvent) and event.stopped_by_middleware: + raise ConversationLimitException(event.content) + + return formatter.finalize() + + return asyncio.run(_async_run()) diff --git a/vibe/core/prompts/__init__.py b/vibe/core/prompts/__init__.py new file mode 100644 index 0000000..a435a6d --- /dev/null +++ b/vibe/core/prompts/__init__.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from enum import StrEnum, auto +from pathlib import Path + +from vibe import VIBE_ROOT + +_PROMPTS_DIR = VIBE_ROOT / "core" / "prompts" + + +class Prompt(StrEnum): + @property + def path(self) -> Path: + return (_PROMPTS_DIR / self.value).with_suffix(".md") + + def read(self) -> str: + return self.path.read_text(encoding="utf-8").strip() + + +class SystemPrompt(Prompt): + CLI = auto() + TESTS = auto() + + +class UtilityPrompt(Prompt): + COMPACT = auto() + DANGEROUS_DIRECTORY = auto() + PROJECT_CONTEXT = auto() + + +__all__ = ["SystemPrompt", "UtilityPrompt"] diff --git a/vibe/core/prompts/cli.md b/vibe/core/prompts/cli.md new file mode 100644 index 0000000..99ce2d0 --- /dev/null +++ b/vibe/core/prompts/cli.md @@ -0,0 +1,24 @@ +You are operating as and within Mistral Vibe, a CLI coding-agent built by Mistral AI and powered by default by the Devstral family of models. It wraps Mistral's Devstral models to enable natural language interaction with a local codebase. Use the available tools when helpful. + +You can: + +- Receive user prompts, project context, and files. +- Send responses and emit function calls (e.g., shell commands, code edits). +- Apply patches, run commands, based on user approvals. + +Answer the user's request using the relevant tool(s), if they are available. Check that all the required parameters for each tool call are provided or can reasonably be inferred from context. IF there are no relevant tools or there are missing values for required parameters, ask the user to supply these values; otherwise proceed with the tool calls. If the user provides a specific value for a parameter (for example provided in quotes), make sure to use that value EXACTLY. DO NOT make up values for or ask about optional parameters. Carefully analyze descriptive terms in the request as they may indicate required parameter values that should be included even if not explicitly quoted. + +Always try your hardest to use the tools to answer the user's request. If you can't use the tools, explain why and ask the user for more information. + +Act as an agentic assistant, if a user asks for a long task, break it down and do it step by step. + +When you want to commit changes, you will always use the 'git commit' bash command. It will always +be suffixed with a line telling it was generated by Mistral Vibe with the appropriate co-authoring information. +The format you will always uses is the following heredoc. + +```bash +git commit -m " + +Generated by Mistral Vibe. +Co-Authored-By: Mistral Vibe " +``` diff --git a/vibe/core/prompts/compact.md b/vibe/core/prompts/compact.md new file mode 100644 index 0000000..9e3aaf3 --- /dev/null +++ b/vibe/core/prompts/compact.md @@ -0,0 +1,48 @@ +Create a comprehensive summary of our entire conversation that will serve as complete context for continuing this work. Structure your summary to capture both the narrative flow and technical details necessary for seamless continuation. + +Your summary must include these sections in order: + +## 1. User's Primary Goals and Intent +Capture ALL explicit requests and objectives stated by the user throughout the conversation, preserving their exact priorities and constraints. + +## 2. Conversation Timeline and Progress +Chronologically document the key phases of our work: +- Initial requests and how they were addressed +- Major decisions made and their rationale +- Problems encountered and solutions applied +- Current state of the work + +## 3. Technical Context and Decisions +- Technologies, frameworks, and tools being used +- Architectural patterns and design decisions made +- Key technical constraints or requirements identified +- Important code patterns or conventions established + +## 4. Files and Code Changes +For each file created, modified, or examined: +- Full file path/name +- Purpose and importance of the file +- Specific changes made (with key code snippets where critical) +- Current state of the file + +## 5. Active Work and Last Actions +CRITICAL: Detail EXACTLY what was being worked on in the most recent exchanges: +- The specific task or problem being addressed +- Last completed action +- Any partial work or mid-implementation state +- Include relevant code snippets from the most recent work + +## 6. Unresolved Issues and Pending Tasks +- Any errors or issues still requiring attention +- Tasks explicitly requested but not yet started +- Decisions waiting for user input + +## 7. Immediate Next Step +State the SPECIFIC next action to take based on: +- The user's most recent request +- The current state of implementation +- Any ongoing work that was interrupted + +Important: Be precise with technical details, file names, and code. The next agent reading this should be able to continue exactly where we left off without asking clarifying questions. Include enough detail that no context is lost, but remain focused on actionable information. + +Respond with ONLY the summary text following this structure - no additional commentary or meta-discussion. diff --git a/vibe/core/prompts/dangerous_directory.md b/vibe/core/prompts/dangerous_directory.md new file mode 100644 index 0000000..93632b5 --- /dev/null +++ b/vibe/core/prompts/dangerous_directory.md @@ -0,0 +1,5 @@ +directoryStructure: Project context scanning has been disabled because {reason}. This prevents permission dialogs and potential system slowdowns. Use the LS tool and other file tools to explore the project structure as needed. + +Absolute path: {abs_path} + +gitStatus: Use git tools to check repository status if needed. diff --git a/vibe/core/prompts/project_context.md b/vibe/core/prompts/project_context.md new file mode 100644 index 0000000..7b2d454 --- /dev/null +++ b/vibe/core/prompts/project_context.md @@ -0,0 +1,8 @@ +directoryStructure: Below is a snapshot of this project's file structure at the start of the conversation. This snapshot will NOT update during the conversation. It skips over .gitignore patterns.{large_repo_warning} + +{structure} + +Absolute path: {abs_path} + +gitStatus: This is the git status at the start of the conversation. Note that this status is a snapshot in time, and will not update during the conversation. +{git_status} diff --git a/vibe/core/prompts/tests.md b/vibe/core/prompts/tests.md new file mode 100644 index 0000000..7fc4c99 --- /dev/null +++ b/vibe/core/prompts/tests.md @@ -0,0 +1 @@ +You are Vibe, a super useful programming assistant. diff --git a/vibe/core/system_prompt.py b/vibe/core/system_prompt.py new file mode 100644 index 0000000..eb4a9b4 --- /dev/null +++ b/vibe/core/system_prompt.py @@ -0,0 +1,404 @@ +from __future__ import annotations + +from collections.abc import Generator +import fnmatch +import os +from pathlib import Path +import subprocess +import sys +import time +from typing import TYPE_CHECKING + +from vibe.core.config import INSTRUCTIONS_FILE, PROJECT_DOC_FILENAMES +from vibe.core.llm.format import get_active_tool_classes +from vibe.core.prompts import UtilityPrompt +from vibe.core.utils import is_dangerous_directory, is_windows + +if TYPE_CHECKING: + from vibe.core.config import ProjectContextConfig, VibeConfig + from vibe.core.tools.manager import ToolManager + + +def _load_user_instructions() -> str: + try: + return INSTRUCTIONS_FILE.read_text("utf-8", errors="ignore") + except (FileNotFoundError, OSError): + return "" + + +def _load_project_doc(workdir: Path, max_bytes: int) -> str: + for name in PROJECT_DOC_FILENAMES: + path = workdir / name + try: + return path.read_text("utf-8", errors="ignore")[:max_bytes] + except (FileNotFoundError, OSError): + continue + return "" + + +class ProjectContextProvider: + def __init__( + self, config: ProjectContextConfig, root_path: str | Path = "." + ) -> None: + self.root_path = Path(root_path).resolve() + self.config = config + self.gitignore_patterns = self._load_gitignore_patterns() + self._file_count = 0 + self._start_time = 0.0 + + def _load_gitignore_patterns(self) -> list[str]: + gitignore_path = self.root_path / ".gitignore" + patterns = [] + + if gitignore_path.exists(): + try: + patterns.extend( + line.strip() + for line in gitignore_path.read_text(encoding="utf-8").splitlines() + if line.strip() and not line.startswith("#") + ) + except Exception as e: + print(f"Warning: Could not read .gitignore: {e}", file=sys.stderr) + + default_patterns = [ + ".git", + ".git/*", + "*.pyc", + "__pycache__", + "node_modules", + "node_modules/*", + ".env", + ".DS_Store", + "*.log", + ".vscode/settings.json", + ".idea/*", + "dist", + "build", + "target", + ".next", + ".nuxt", + "coverage", + ".nyc_output", + "*.egg-info", + ".pytest_cache", + ".tox", + "vendor", + "third_party", + "deps", + "*.min.js", + "*.min.css", + "*.bundle.js", + "*.chunk.js", + ".cache", + "tmp", + "temp", + "logs", + ] + + return patterns + default_patterns + + def _is_ignored(self, path: Path) -> bool: + try: + relative_path = path.relative_to(self.root_path) + path_str = str(relative_path) + + for pattern in self.gitignore_patterns: + if pattern.endswith("/"): + if path.is_dir() and fnmatch.fnmatch(f"{path_str}/", pattern): + return True + elif fnmatch.fnmatch(path_str, pattern): + return True + elif "*" in pattern or "?" in pattern: + if fnmatch.fnmatch(path_str, pattern): + return True + + return False + except (ValueError, OSError): + return True + + def _should_stop(self) -> bool: + return ( + self._file_count >= self.config.max_files + or (time.time() - self._start_time) > self.config.timeout_seconds + ) + + def _build_tree_structure_iterative(self) -> Generator[str]: + self._start_time = time.time() + self._file_count = 0 + + yield from self._process_directory(self.root_path, "", 0, is_root=True) + + def _process_directory( + self, path: Path, prefix: str, depth: int, is_root: bool = False + ) -> Generator[str]: + if depth > self.config.max_depth or self._should_stop(): + return + + try: + all_items = list(path.iterdir()) + items = [item for item in all_items if not self._is_ignored(item)] + + items.sort(key=lambda p: (not p.is_dir(), p.name.lower())) + + show_truncation = len(items) > self.config.max_dirs_per_level + if show_truncation: + items = items[: self.config.max_dirs_per_level] + + for i, item in enumerate(items): + if self._should_stop(): + break + + is_last = i == len(items) - 1 and not show_truncation + connector = "└── " if is_last else "├── " + name = f"{item.name}{'/' if item.is_dir() else ''}" + + yield f"{prefix}{connector}{name}" + self._file_count += 1 + + if item.is_dir() and depth < self.config.max_depth: + child_prefix = prefix + (" " if is_last else "│ ") + yield from self._process_directory(item, child_prefix, depth + 1) + + if show_truncation and not self._should_stop(): + remaining = len(all_items) - len(items) + yield f"{prefix}└── ... ({remaining} more items)" + + except (PermissionError, OSError): + pass + + def get_directory_structure(self) -> str: + lines = [] + header = f"Directory structure of {self.root_path.name} (depth≤{self.config.max_depth}, max {self.config.max_files} items):\n" + + try: + for line in self._build_tree_structure_iterative(): + lines.append(line) + + current_text = header + "\n".join(lines) + if ( + len(current_text) + > self.config.max_chars - self.config.truncation_buffer + ): + break + + except Exception as e: + lines.append(f"Error building structure: {e}") + + structure = header + "\n".join(lines) + + if self._file_count >= self.config.max_files: + structure += f"\n... (truncated at {self.config.max_files} files limit)" + elif (time.time() - self._start_time) > self.config.timeout_seconds: + structure += ( + f"\n... (truncated due to {self.config.timeout_seconds}s timeout)" + ) + elif len(structure) > self.config.max_chars: + structure += f"\n... (truncated at {self.config.max_chars} characters)" + + return structure + + def get_git_status(self) -> str: + try: + timeout = min(self.config.timeout_seconds, 10.0) + num_commits = self.config.default_commit_count + + current_branch = subprocess.run( + ["git", "branch", "--show-current"], + capture_output=True, + check=True, + cwd=self.root_path, + stdin=subprocess.DEVNULL if is_windows() else None, + text=True, + timeout=timeout, + ).stdout.strip() + + main_branch = "main" + try: + branches_output = subprocess.run( + ["git", "branch", "-r"], + capture_output=True, + check=True, + cwd=self.root_path, + stdin=subprocess.DEVNULL if is_windows() else None, + text=True, + timeout=timeout, + ).stdout + if "origin/master" in branches_output: + main_branch = "master" + except (subprocess.CalledProcessError, subprocess.TimeoutExpired): + pass + + status_output = subprocess.run( + ["git", "status", "--porcelain"], + capture_output=True, + check=True, + cwd=self.root_path, + stdin=subprocess.DEVNULL if is_windows() else None, + text=True, + timeout=timeout, + ).stdout.strip() + + if status_output: + status_lines = status_output.splitlines() + MAX_GIT_STATUS_SIZE = 50 + if len(status_lines) > MAX_GIT_STATUS_SIZE: + status = ( + f"({len(status_lines)} changes - use 'git status' for details)" + ) + else: + status = f"({len(status_lines)} changes)" + else: + status = "(clean)" + + log_output = subprocess.run( + ["git", "log", "--oneline", f"-{num_commits}", "--decorate"], + capture_output=True, + check=True, + cwd=self.root_path, + stdin=subprocess.DEVNULL if is_windows() else None, + text=True, + timeout=timeout, + ).stdout.strip() + + recent_commits = [] + for line in log_output.split("\n"): + if not (line := line.strip()): + continue + + if " " in line: + commit_hash, commit_msg = line.split(" ", 1) + if ( + "(" in commit_msg + and ")" in commit_msg + and (paren_index := commit_msg.rfind("(")) > 0 + ): + commit_msg = commit_msg[:paren_index].strip() + recent_commits.append(f"{commit_hash} {commit_msg}") + else: + recent_commits.append(line) + + git_info_parts = [ + f"Current branch: {current_branch}", + f"Main branch (you will usually use this for PRs): {main_branch}", + f"Status: {status}", + ] + + if recent_commits: + git_info_parts.append("Recent commits:") + git_info_parts.extend(recent_commits) + + return "\n".join(git_info_parts) + + except subprocess.TimeoutExpired: + return "Git operations timed out (large repository)" + except subprocess.CalledProcessError: + return "Not a git repository or git not available" + except Exception as e: + return f"Error getting git status: {e}" + + def get_full_context(self) -> str: + structure = self.get_directory_structure() + git_status = self.get_git_status() + + large_repo_warning = "" + if len(structure) >= self.config.max_chars - self.config.truncation_buffer: + large_repo_warning = ( + f" Large repository detected - showing summary view with depth limit {self.config.max_depth}. " + f"Use the LS tool (passing a specific path), Bash tool, and other tools to explore nested directories in detail." + ) + + template = UtilityPrompt.PROJECT_CONTEXT.read() + return template.format( + large_repo_warning=large_repo_warning, + structure=structure, + abs_path=self.root_path, + git_status=git_status, + ) + + +def _get_platform_name() -> str: + platform_names = { + "win32": "Windows", + "darwin": "macOS", + "linux": "Linux", + "freebsd": "FreeBSD", + "openbsd": "OpenBSD", + "netbsd": "NetBSD", + } + return platform_names.get(sys.platform, "Unix-like") + + +def _get_default_shell() -> str: + """Get the default shell used by asyncio.create_subprocess_shell. + + On Unix, this is always 'sh'. + On Windows, this is COMSPEC or cmd.exe. + """ + if is_windows(): + return os.environ.get("COMSPEC", "cmd.exe") + return "sh" + + +def _get_os_system_prompt() -> str: + shell = _get_default_shell() + platform_name = _get_platform_name() + prompt = f"The operating system is {platform_name} with shell `{shell}`" + + if is_windows(): + prompt += "\n" + _get_windows_system_prompt() + return prompt + + +def _get_windows_system_prompt() -> str: + return ( + "### COMMAND COMPATIBILITY RULES (MUST FOLLOW):\n" + "- DO NOT use Unix commands like `ls`, `grep`, `cat` - they won't work on Windows\n" + "- Use: `dir` (Windows) for directory listings\n" + "- Use: backslashes (\\\\) for paths\n" + "- Check command availability with: `where command` (Windows)\n" + "- Script shebang: Not applicable on Windows\n" + "### ALWAYS verify commands work on the detected platform before suggesting them" + ) + + +def get_universal_system_prompt(tool_manager: ToolManager, config: VibeConfig) -> str: + sections = [config.system_prompt] + + if config.include_model_info: + sections.append(f"Your model name is: `{config.active_model}`") + + if config.include_prompt_detail: + sections.append(_get_os_system_prompt()) + tool_prompts = [] + active_tools = get_active_tool_classes(tool_manager, config) + for tool_class in active_tools: + if prompt := tool_class.get_tool_prompt(): + tool_prompts.append(prompt) + if tool_prompts: + sections.append("\n---\n".join(tool_prompts)) + + user_instructions = config.instructions.strip() or _load_user_instructions() + if user_instructions.strip(): + sections.append(user_instructions) + + if config.include_project_context: + is_dangerous, reason = is_dangerous_directory() + if is_dangerous: + template = UtilityPrompt.DANGEROUS_DIRECTORY.read() + context = template.format( + reason=reason.lower(), abs_path=Path(".").resolve() + ) + else: + context = ProjectContextProvider( + config=config.project_context, root_path=config.effective_workdir + ).get_full_context() + + sections.append(context) + + project_doc = _load_project_doc( + config.effective_workdir, config.project_context.max_doc_bytes + ) + if project_doc.strip(): + sections.append(project_doc) + + return "\n\n".join(sections) diff --git a/vibe/core/tools/base.py b/vibe/core/tools/base.py new file mode 100644 index 0000000..de4ff6f --- /dev/null +++ b/vibe/core/tools/base.py @@ -0,0 +1,281 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from enum import StrEnum, auto +import functools +import inspect +from pathlib import Path +import re +import sys +from typing import Any, ClassVar, cast, get_args, get_type_hints + +from pydantic import BaseModel, ConfigDict, Field, ValidationError, field_validator + +ARGS_COUNT = 4 + + +class ToolError(Exception): + """Raised when the tool encounters an unrecoverable problem.""" + + +class ToolInfo(BaseModel): + """Information about a tool. + + Attributes: + name: The name of the tool. + description: A brief description of what the tool does. + parameters: A dictionary of parameters required by the tool. + """ + + name: str + description: str + parameters: dict[str, Any] + + +class ToolPermissionError(Exception): + """Raised when a tool permission is not allowed.""" + + +class ToolPermission(StrEnum): + ALWAYS = auto() + NEVER = auto() + ASK = auto() + + @classmethod + def by_name(cls, name: str) -> ToolPermission: + try: + return ToolPermission(name.upper()) + except ValueError: + raise ToolPermissionError( + f"Invalid tool permission: {name}. Must be one of {list(cls)}" + ) + + +class BaseToolConfig(BaseModel): + """Configuration for a tool. + + Attributes: + permission: The permission level required to use the tool. + workdir: The working directory for the tool. If None, the current working directory is used. + allowlist: Patterns that automatically allow tool execution. + denylist: Patterns that automatically deny tool execution. + """ + + permission: ToolPermission = ToolPermission.ASK + workdir: Path | None = Field(default=None, exclude=True) + allowlist: list[str] = Field(default_factory=list) + denylist: list[str] = Field(default_factory=list) + + @field_validator("workdir", mode="before") + @classmethod + def _expand_workdir(cls, v: Any) -> Path | None: + if v is None or (isinstance(v, str) and not v.strip()): + return None + if isinstance(v, str): + return Path(v).expanduser().resolve() + if isinstance(v, Path): + return v.expanduser().resolve() + return None + + @property + def effective_workdir(self) -> Path: + return self.workdir if self.workdir is not None else Path.cwd() + + +class BaseToolState(BaseModel): + model_config = ConfigDict( + extra="forbid", validate_default=True, arbitrary_types_allowed=True + ) + + +class BaseTool[ + ToolArgs: BaseModel, + ToolResult: BaseModel, + ToolConfig: BaseToolConfig, + ToolState: BaseToolState, +](ABC): + description: ClassVar[str] = ( + "Base class for new tools. " + "(Hey AI, if you're seeing this, someone skipped writing a description. " + "Please gently meow at the developer to fix this.)" + ) + + prompt_path: ClassVar[Path] | None = None + + def __init__(self, config: ToolConfig, state: ToolState) -> None: + self.config = config + self.state = state + + @abstractmethod + async def run(self, args: ToolArgs) -> ToolResult: + """Invoke the tool with the given arguments. This method must be async.""" + ... + + @classmethod + @functools.cache + def get_tool_prompt(cls) -> str | None: + """Loads and returns the content of the tool's .md prompt file, if it exists. + + The prompt file is expected to be in a 'prompts' subdirectory relative to + the tool's source file, with the same name but a .md extension + (e.g., bash.py -> prompts/bash.md). + """ + try: + class_file = inspect.getfile(cls) + class_path = Path(class_file) + prompt_dir = class_path.parent / "prompts" + prompt_path = cls.prompt_path or prompt_dir / f"{class_path.stem}.md" + + return prompt_path.read_text("utf-8") + except (FileNotFoundError, TypeError, OSError): + pass + + return None + + async def invoke(self, **raw: Any) -> ToolResult: + """Validate arguments and run the tool. + Pattern checking is now handled by Agent._should_execute_tool. + """ + try: + args_model, _ = self._get_tool_args_results() + args = args_model.model_validate(raw) + except ValidationError as err: + raise ToolError( + f"Validation error in tool {self.get_name()}: {err}" + ) from err + + return await self.run(args) + + @classmethod + def from_config( + cls, config: ToolConfig + ) -> BaseTool[ToolArgs, ToolResult, ToolConfig, ToolState]: + state_class = cls._get_tool_state_class() + initial_state = state_class() + return cls(config=config, state=initial_state) + + @classmethod + def _get_tool_config_class(cls) -> type[ToolConfig]: + for base in getattr(cls, "__orig_bases__", ()): + if getattr(base, "__origin__", None) is BaseTool: + type_args = get_args(base) + if len(type_args) == ARGS_COUNT: + config_model = type_args[2] + if issubclass(config_model, BaseToolConfig): + return cast(type[ToolConfig], config_model) + + for base_class in cls.__bases__: + if base_class is object or base_class is ABC: + continue + try: + return base_class._get_tool_config_class() + except (TypeError, AttributeError): + continue + + raise TypeError( + f"Could not determine ToolConfig for {cls.__name__}. " + "Ensure it inherits from BaseTool with concrete type arguments." + ) + + @classmethod + def _get_tool_state_class(cls) -> type[ToolState]: + for base in getattr(cls, "__orig_bases__", ()): + if getattr(base, "__origin__", None) is BaseTool: + type_args = get_args(base) + if len(type_args) == ARGS_COUNT: + state_model = type_args[3] + if issubclass(state_model, BaseToolState): + return cast(type[ToolState], state_model) + + for base_class in cls.__bases__: + if base_class is object or base_class is ABC: + continue + try: + return base_class._get_tool_state_class() + except (TypeError, AttributeError): + continue + + raise TypeError( + f"Could not determine ToolState for {cls.__name__}. " + "Ensure it inherits from BaseTool with concrete type arguments." + ) + + @classmethod + def _get_tool_args_results(cls) -> tuple[type[ToolArgs], type[ToolResult]]: + """Extract from the annotated signature of `run`. + Works even when `from __future__ import annotations` is in effect. + """ + run_fn = cls.run.__func__ if isinstance(cls.run, classmethod) else cls.run + + type_hints = get_type_hints( + run_fn, + globalns=vars(sys.modules[cls.__module__]), + localns={cls.__name__: cls}, + ) + + try: + args_model = type_hints["args"] + result_model = type_hints["return"] + except KeyError as e: + raise TypeError( + f"{cls.__name__}.run must be annotated as " + "`async def run(self, args: ToolArgs) -> ToolResult`" + ) from e + + if not ( + issubclass(args_model, BaseModel) and issubclass(result_model, BaseModel) + ): + raise TypeError( + f"{cls.__name__}.run annotations must be Pydantic models; " + f"got {args_model!r}, {result_model!r}" + ) + + return cast(type[ToolArgs], args_model), cast(type[ToolResult], result_model) + + @classmethod + def get_parameters(cls) -> dict[str, Any]: + """Return a cleaned-up JSON-schema dict describing the arguments model + with which this concrete tool was parametrised. + """ + args_model, _ = cls._get_tool_args_results() + schema = args_model.model_json_schema() + schema.pop("title", None) + schema.pop("description", None) + + if "properties" in schema: + for prop_details in schema["properties"].values(): + prop_details.pop("title", None) + + if "$defs" in schema: + for def_details in schema["$defs"].values(): + def_details.pop("title", None) + if "properties" in def_details: + for prop_details in def_details["properties"].values(): + prop_details.pop("title", None) + + return schema + + @classmethod + def get_name(cls) -> str: + name = cls.__name__ + snake_case = re.sub(r"(? BaseToolConfig: + config_class = cls._get_tool_config_class() + return config_class(permission=permission) + + def check_allowlist_denylist(self, args: ToolArgs) -> ToolPermission | None: + """Check if args match allowlist/denylist patterns. + + Returns: + ToolPermission.ALWAYS if allowlisted + ToolPermission.NEVER if denylisted + None if no match (proceed with normal permission check) + + Base implementation returns None. Override in subclasses for specific logic. + """ + return None diff --git a/vibe/core/tools/builtins/bash.py b/vibe/core/tools/builtins/bash.py new file mode 100644 index 0000000..70a520f --- /dev/null +++ b/vibe/core/tools/builtins/bash.py @@ -0,0 +1,285 @@ +from __future__ import annotations + +import asyncio +import os +import re +import signal +import sys +from typing import ClassVar, Literal, final + +from pydantic import BaseModel, Field + +from vibe.core.tools.base import ( + BaseTool, + BaseToolConfig, + BaseToolState, + ToolError, + ToolPermission, +) +from vibe.core.utils import is_windows + + +def _get_subprocess_encoding() -> str: + if sys.platform == "win32": + # Windows console uses OEM code page (e.g., cp850, cp1252) + import ctypes + + return f"cp{ctypes.windll.kernel32.GetOEMCP()}" + return "utf-8" + + +def _get_base_env() -> dict[str, str]: + base_env = { + **os.environ, + "CI": "true", + "NONINTERACTIVE": "1", + "NO_TTY": "1", + "NO_COLOR": "1", + } + + if is_windows(): + base_env["GIT_PAGER"] = "more" + base_env["PAGER"] = "more" + else: + base_env["TERM"] = "dumb" + base_env["DEBIAN_FRONTEND"] = "noninteractive" + base_env["GIT_PAGER"] = "cat" + base_env["PAGER"] = "cat" + base_env["LESS"] = "-FX" + base_env["LC_ALL"] = "en_US.UTF-8" + + return base_env + + +async def _kill_process_tree(proc: asyncio.subprocess.Process) -> None: + if proc.returncode is not None: + return + + try: + if sys.platform == "win32": + try: + subprocess_proc = await asyncio.create_subprocess_exec( + "taskkill", + "/F", + "/T", + "/PID", + str(proc.pid), + stdout=asyncio.subprocess.DEVNULL, + stderr=asyncio.subprocess.DEVNULL, + ) + await subprocess_proc.wait() + except (FileNotFoundError, OSError): + proc.terminate() + else: + os.killpg(os.getpgid(proc.pid), signal.SIGKILL) + + await proc.wait() + except (ProcessLookupError, PermissionError, OSError): + pass + + +def _get_default_allowlist() -> list[str]: + common = ["echo", "find", "git diff", "git log", "git status", "tree", "whoami"] + + if is_windows(): + return common + ["dir", "findstr", "more", "type", "ver", "where"] + else: + return common + [ + "cat", + "file", + "head", + "ls", + "pwd", + "stat", + "tail", + "uname", + "wc", + "which", + ] + + +def _get_default_denylist() -> list[str]: + common = ["gdb", "pdb", "passwd"] + + if is_windows(): + return common + ["cmd /k", "powershell -NoExit", "pwsh -NoExit", "notepad"] + else: + return common + [ + "nano", + "vim", + "vi", + "emacs", + "bash -i", + "sh -i", + "zsh -i", + "fish -i", + "dash -i", + "screen", + "tmux", + ] + + +def _get_default_denylist_standalone() -> list[str]: + common = ["python", "python3", "ipython"] + + if is_windows(): + return common + ["cmd", "powershell", "pwsh", "notepad"] + else: + return common + ["bash", "sh", "nohup", "vi", "vim", "emacs", "nano", "su"] + + +class BashToolConfig(BaseToolConfig): + permission: ToolPermission = ToolPermission.ASK + max_output_bytes: int = Field( + default=16_000, description="Maximum bytes to capture from stdout and stderr." + ) + default_timeout: int = Field( + default=30, description="Default timeout for commands in seconds." + ) + allowlist: list[str] = Field( + default_factory=_get_default_allowlist, + description="Command prefixes that are automatically allowed", + ) + denylist: list[str] = Field( + default_factory=_get_default_denylist, + description="Command prefixes that are automatically denied", + ) + denylist_standalone: list[str] = Field( + default_factory=_get_default_denylist_standalone, + description="Commands that are denied only when run without arguments", + ) + + +class BashArgs(BaseModel): + command: str + timeout: int | None = Field( + default=None, description="Override the default command timeout." + ) + + +class BashResult(BaseModel): + stdout: str + stderr: str + returncode: int + + +class Bash(BaseTool[BashArgs, BashResult, BashToolConfig, BaseToolState]): + description: ClassVar[str] = "Run a one-off bash command and capture its output." + + def check_allowlist_denylist(self, args: BashArgs) -> ToolPermission | None: + command_parts = re.split(r"(?:&&|\|\||;|\|)", args.command) + command_parts = [part.strip() for part in command_parts if part.strip()] + + if not command_parts: + return None + + def is_denylisted(command: str) -> bool: + return any(command.startswith(pattern) for pattern in self.config.denylist) + + def is_standalone_denylisted(command: str) -> bool: + parts = command.split() + if not parts: + return False + + base_command = parts[0] + has_args = len(parts) > 1 + + if not has_args: + command_name = os.path.basename(base_command) + if command_name in self.config.denylist_standalone: + return True + if base_command in self.config.denylist_standalone: + return True + + return False + + def is_allowlisted(command: str) -> bool: + return any(command.startswith(pattern) for pattern in self.config.allowlist) + + for part in command_parts: + if is_denylisted(part): + return ToolPermission.NEVER + if is_standalone_denylisted(part): + return ToolPermission.NEVER + + if all(is_allowlisted(part) for part in command_parts): + return ToolPermission.ALWAYS + + return None + + @final + def _build_timeout_error(self, command: str, timeout: int) -> ToolError: + return ToolError(f"Command timed out after {timeout}s: {command!r}") + + @final + def _build_result( + self, *, command: str, stdout: str, stderr: str, returncode: int + ) -> BashResult: + if returncode != 0: + error_msg = f"Command failed: {command!r}\n" + error_msg += f"Return code: {returncode}" + if stderr: + error_msg += f"\nStderr: {stderr}" + if stdout: + error_msg += f"\nStdout: {stdout}" + raise ToolError(error_msg.strip()) + + return BashResult(stdout=stdout, stderr=stderr, returncode=returncode) + + async def run(self, args: BashArgs) -> BashResult: + timeout = args.timeout or self.config.default_timeout + max_bytes = self.config.max_output_bytes + + proc = None + try: + # start_new_session is Unix-only, on Windows it's ignored + kwargs: dict[Literal["start_new_session"], bool] = ( + {} if is_windows() else {"start_new_session": True} + ) + + proc = await asyncio.create_subprocess_shell( + args.command, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + stdin=asyncio.subprocess.DEVNULL, + cwd=self.config.effective_workdir, + env=_get_base_env(), + **kwargs, + ) + + try: + stdout_bytes, stderr_bytes = await asyncio.wait_for( + proc.communicate(), timeout=timeout + ) + except TimeoutError: + await _kill_process_tree(proc) + raise self._build_timeout_error(args.command, timeout) + + encoding = _get_subprocess_encoding() + stdout = ( + stdout_bytes.decode(encoding, errors="replace")[:max_bytes] + if stdout_bytes + else "" + ) + stderr = ( + stderr_bytes.decode(encoding, errors="replace")[:max_bytes] + if stderr_bytes + else "" + ) + + returncode = proc.returncode or 0 + + return self._build_result( + command=args.command, + stdout=stdout, + stderr=stderr, + returncode=returncode, + ) + + except (ToolError, asyncio.CancelledError): + raise + except Exception as exc: + raise ToolError(f"Error running command {args.command!r}: {exc}") from exc + finally: + if proc is not None: + await _kill_process_tree(proc) diff --git a/vibe/core/tools/builtins/grep.py b/vibe/core/tools/builtins/grep.py new file mode 100644 index 0000000..3ab01e4 --- /dev/null +++ b/vibe/core/tools/builtins/grep.py @@ -0,0 +1,326 @@ +from __future__ import annotations + +import asyncio +from enum import StrEnum, auto +from pathlib import Path +import shutil +from typing import TYPE_CHECKING, ClassVar + +from pydantic import BaseModel, Field + +from vibe.core.tools.base import ( + BaseTool, + BaseToolConfig, + BaseToolState, + ToolError, + ToolPermission, +) +from vibe.core.tools.ui import ToolCallDisplay, ToolResultDisplay, ToolUIData + +if TYPE_CHECKING: + from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class GrepBackend(StrEnum): + RIPGREP = auto() + GNU_GREP = auto() + + +class GrepToolConfig(BaseToolConfig): + permission: ToolPermission = ToolPermission.ALWAYS + + max_output_bytes: int = Field( + default=64_000, description="Hard cap for the total size of matched lines." + ) + default_max_matches: int = Field( + default=100, description="Default maximum number of matches to return." + ) + default_timeout: int = Field( + default=60, description="Default timeout for the search command in seconds." + ) + exclude_patterns: list[str] = Field( + default=[ + ".venv/", + "venv/", + ".env/", + "env/", + "node_modules/", + ".git/", + "__pycache__/", + ".pytest_cache/", + ".mypy_cache/", + ".tox/", + ".nox/", + ".coverage/", + "htmlcov/", + "dist/", + "build/", + ".idea/", + ".vscode/", + "*.egg-info", + "*.pyc", + "*.pyo", + "*.pyd", + ".DS_Store", + "Thumbs.db", + ], + description="List of glob patterns to exclude from search (dirs should end with /).", + ) + codeignore_file: str = Field( + default=".vibeignore", + description="Name of the file to read for additional exclusion patterns.", + ) + + +class GrepState(BaseToolState): + search_history: list[str] = Field(default_factory=list) + + +class GrepArgs(BaseModel): + pattern: str + path: str = "." + max_matches: int | None = Field( + default=None, description="Override the default maximum number of matches." + ) + use_default_ignore: bool = Field( + default=True, description="Whether to respect .gitignore and .ignore files." + ) + + +class GrepResult(BaseModel): + matches: str + match_count: int + was_truncated: bool = Field( + description="True if output was cut short by max_matches or max_output_bytes." + ) + + +class Grep( + BaseTool[GrepArgs, GrepResult, GrepToolConfig, GrepState], + ToolUIData[GrepArgs, GrepResult], +): + description: ClassVar[str] = ( + "Recursively search files for a regex pattern using ripgrep (rg) or grep. " + "Respects .gitignore and .codeignore files by default when using ripgrep." + ) + + def _detect_backend(self) -> GrepBackend: + if shutil.which("rg"): + return GrepBackend.RIPGREP + if shutil.which("grep"): + return GrepBackend.GNU_GREP + raise ToolError( + "Neither ripgrep (rg) nor grep is installed. " + "Please install ripgrep: https://github.com/BurntSushi/ripgrep#installation" + ) + + async def run(self, args: GrepArgs) -> GrepResult: + backend = self._detect_backend() + self._validate_args(args) + self.state.search_history.append(args.pattern) + + exclude_patterns = self._collect_exclude_patterns() + cmd = self._build_command(args, exclude_patterns, backend) + stdout = await self._execute_search(cmd) + + return self._parse_output( + stdout, args.max_matches or self.config.default_max_matches + ) + + def _validate_args(self, args: GrepArgs) -> None: + if not args.pattern.strip(): + raise ToolError("Empty search pattern provided.") + + path_obj = Path(args.path).expanduser() + if not path_obj.is_absolute(): + path_obj = self.config.effective_workdir / path_obj + + if not path_obj.exists(): + raise ToolError(f"Path does not exist: {args.path}") + + def _collect_exclude_patterns(self) -> list[str]: + patterns = list(self.config.exclude_patterns) + + codeignore_path = self.config.effective_workdir / self.config.codeignore_file + if codeignore_path.is_file(): + patterns.extend(self._load_codeignore_patterns(codeignore_path)) + + return patterns + + def _load_codeignore_patterns(self, codeignore_path: Path) -> list[str]: + patterns = [] + try: + content = codeignore_path.read_text("utf-8") + for line in content.splitlines(): + line = line.strip() + if line and not line.startswith("#"): + patterns.append(line) + except OSError: + pass + + return patterns + + def _build_command( + self, args: GrepArgs, exclude_patterns: list[str], backend: GrepBackend + ) -> list[str]: + if backend == GrepBackend.RIPGREP: + return self._build_ripgrep_command(args, exclude_patterns) + return self._build_gnu_grep_command(args, exclude_patterns) + + def _build_ripgrep_command( + self, args: GrepArgs, exclude_patterns: list[str] + ) -> list[str]: + max_matches = args.max_matches or self.config.default_max_matches + + cmd = [ + "rg", + "--line-number", + "--no-heading", + "--smart-case", + "--no-binary", + # Request one extra to detect truncation + "--max-count", + str(max_matches + 1), + ] + + if not args.use_default_ignore: + cmd.append("--no-ignore") + + for pattern in exclude_patterns: + cmd.extend(["--glob", f"!{pattern}"]) + + cmd.extend(["-e", args.pattern, args.path]) + + return cmd + + def _build_gnu_grep_command( + self, args: GrepArgs, exclude_patterns: list[str] + ) -> list[str]: + max_matches = args.max_matches or self.config.default_max_matches + + cmd = ["grep", "-r", "-n", "-I", "-E", f"--max-count={max_matches + 1}"] + + if args.pattern.islower(): + cmd.append("-i") + + for pattern in exclude_patterns: + if pattern.endswith("/"): + dir_pattern = pattern.rstrip("/") + cmd.append(f"--exclude-dir={dir_pattern}") + else: + cmd.append(f"--exclude={pattern}") + + cmd.extend(["-e", args.pattern, args.path]) + + return cmd + + async def _execute_search(self, cmd: list[str]) -> str: + try: + proc = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + cwd=str(self.config.effective_workdir), + ) + + try: + stdout_bytes, stderr_bytes = await asyncio.wait_for( + proc.communicate(), timeout=self.config.default_timeout + ) + except TimeoutError: + proc.kill() + await proc.wait() + raise ToolError( + f"Search timed out after {self.config.default_timeout}s" + ) + + stdout = ( + stdout_bytes.decode("utf-8", errors="ignore") if stdout_bytes else "" + ) + stderr = ( + stderr_bytes.decode("utf-8", errors="ignore") if stderr_bytes else "" + ) + + if proc.returncode not in {0, 1}: + error_msg = stderr or f"Process exited with code {proc.returncode}" + raise ToolError(f"grep error: {error_msg}") + + return stdout + + except ToolError: + raise + except Exception as exc: + raise ToolError(f"Error running grep: {exc}") from exc + + def _parse_output(self, stdout: str, max_matches: int) -> GrepResult: + output_lines = stdout.splitlines() if stdout else [] + + truncated_lines = output_lines[:max_matches] + truncated_output = "\n".join(truncated_lines) + + was_truncated = ( + len(output_lines) > max_matches + or len(truncated_output) > self.config.max_output_bytes + ) + + final_output = truncated_output[: self.config.max_output_bytes] + + return GrepResult( + matches=final_output, + match_count=len(truncated_lines), + was_truncated=was_truncated, + ) + + @classmethod + def get_call_display(cls, event: ToolCallEvent) -> ToolCallDisplay: + if not isinstance(event.args, GrepArgs): + return ToolCallDisplay(summary="grep") + + summary = f"grep: '{event.args.pattern}'" + if event.args.path != ".": + summary += f" in {event.args.path}" + if event.args.max_matches: + summary += f" (max {event.args.max_matches} matches)" + if not event.args.use_default_ignore: + summary += " [no-ignore]" + + return ToolCallDisplay( + summary=summary, + details={ + "pattern": event.args.pattern, + "path": event.args.path, + "max_matches": event.args.max_matches, + "use_default_ignore": event.args.use_default_ignore, + }, + ) + + @classmethod + def get_result_display(cls, event: ToolResultEvent) -> ToolResultDisplay: + if not isinstance(event.result, GrepResult): + return ToolResultDisplay( + success=False, message=event.error or event.skip_reason or "No result" + ) + + message = f"Found {event.result.match_count} matches" + if event.result.was_truncated: + message += " (truncated)" + + warnings = [] + if event.result.was_truncated: + warnings.append("Output was truncated due to size/match limits") + + return ToolResultDisplay( + success=True, + message=message, + warnings=warnings, + details={ + "match_count": event.result.match_count, + "was_truncated": event.result.was_truncated, + "matches": event.result.matches, + }, + ) + + @classmethod + def get_status_text(cls) -> str: + """Return status message for spinner.""" + return "Searching files" diff --git a/vibe/core/tools/builtins/prompts/__init__.py b/vibe/core/tools/builtins/prompts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/vibe/core/tools/builtins/prompts/bash.md b/vibe/core/tools/builtins/prompts/bash.md new file mode 100644 index 0000000..665e58d --- /dev/null +++ b/vibe/core/tools/builtins/prompts/bash.md @@ -0,0 +1,68 @@ +Use the `bash` tool to run one-off shell commands. + +**Key characteristics:** +- **Stateless**: Each command runs independently in a fresh environment + +**IMPORTANT: Use dedicated tools if available instead of these bash commands:** + +**File Operations - DO NOT USE:** +- `cat filename` → Use `read_file(path="filename")` +- `head -n 20 filename` → Use `read_file(path="filename", limit=20)` +- `tail -n 20 filename` → Read with offset: `read_file(path="filename", offset=, limit=20)` +- `sed -n '100,200p' filename` → Use `read_file(path="filename", offset=99, limit=101)` +- `less`, `more`, `vim`, `nano` → Use `read_file` with offset/limit for navigation +- `echo "content" > file` → Use `write_file(path="file", content="content")` +- `echo "content" >> file` → Read first, then `write_file` with overwrite=true + +**Search Operations - DO NOT USE:** +- `grep -r "pattern" .` → Use `grep(pattern="pattern", path=".")` +- `find . -name "*.py"` → Use `bash("ls -la")` for current dir or `grep` with appropriate pattern +- `ag`, `ack`, `rg` commands → Use the `grep` tool +- `locate` → Use `grep` tool + +**File Modification - DO NOT USE:** +- `sed -i 's/old/new/g' file` → Use `search_replace` tool +- `awk` for file editing → Use `search_replace` tool +- Any in-place file editing → Use `search_replace` tool + +**APPROPRIATE bash uses:** +- System information: `pwd`, `whoami`, `date`, `uname -a` +- Directory listings: `ls -la`, `tree` (if available) +- Git operations: `git status`, `git log --oneline -10`, `git diff` +- Process info: `ps aux | grep process`, `top -n 1` +- Network checks: `ping -c 1 google.com`, `curl -I https://example.com` +- Package management: `pip list`, `npm list` +- Environment checks: `env | grep VAR`, `which python` +- File metadata: `stat filename`, `file filename`, `wc -l filename` + +**Example: Reading a large file efficiently** + +WRONG: +```bash +bash("cat large_file.txt") # May hit size limits +bash("head -1000 large_file.txt") # Inefficient +``` + +RIGHT: +```python +# First chunk +read_file(path="large_file.txt", limit=1000) +# If was_truncated=true, read next chunk +read_file(path="large_file.txt", offset=1000, limit=1000) +``` + +**Example: Searching for patterns** + +WRONG: +```bash +bash("grep -r 'TODO' src/") # Don't use bash for grep +bash("find . -type f -name '*.py' | xargs grep 'import'") # Too complex +``` + +RIGHT: +```python +grep(pattern="TODO", path="src/") +grep(pattern="import", path=".") +``` + +**Remember:** Bash is best for quick system checks and git operations. For file operations, searching, and editing, always use the dedicated tools when they are available. diff --git a/vibe/core/tools/builtins/prompts/grep.md b/vibe/core/tools/builtins/prompts/grep.md new file mode 100644 index 0000000..2c7a31c --- /dev/null +++ b/vibe/core/tools/builtins/prompts/grep.md @@ -0,0 +1,4 @@ +Use `grep` to recursively search for a regular expression pattern in files. + +- It's very fast and automatically ignores files that you should not read like .pyc files, .venv directories, etc. +- Use this to find where functions are defined, how variables are used, or to locate specific error messages. diff --git a/vibe/core/tools/builtins/prompts/read_file.md b/vibe/core/tools/builtins/prompts/read_file.md new file mode 100644 index 0000000..ccbe656 --- /dev/null +++ b/vibe/core/tools/builtins/prompts/read_file.md @@ -0,0 +1,13 @@ +Use `read_file` to read the content of a file. It's designed to handle large files safely. + +- By default, it reads from the beginning of the file. +- Use `offset` (line number) and `limit` (number of lines) to read specific parts or chunks of a file. This is efficient for exploring large files. +- The result includes `was_truncated: true` if the file content was cut short due to size limits. + +**Strategy for large files:** + +1. Call `read_file` with a `limit` (e.g., 1000 lines) to get the start of the file. +2. If `was_truncated` is true, you know the file is large. +3. To read the next chunk, call `read_file` again with an `offset`. For example, `offset=1000, limit=1000`. + +This is more efficient than using `bash` with `cat` or `wc`. diff --git a/vibe/core/tools/builtins/prompts/search_replace.md b/vibe/core/tools/builtins/prompts/search_replace.md new file mode 100644 index 0000000..9ce056a --- /dev/null +++ b/vibe/core/tools/builtins/prompts/search_replace.md @@ -0,0 +1,43 @@ +Use `search_replace` to make targeted changes to files using SEARCH/REPLACE blocks. This tool finds exact text matches and replaces them. + +Arguments: +- `file_path`: The path to the file to modify +- `content`: The SEARCH/REPLACE blocks defining the changes + +The content format is: + +``` +<<<<<<< SEARCH +[exact text to find in the file] +======= +[exact text to replace it with] +>>>>>>> REPLACE +``` + +You can include multiple SEARCH/REPLACE blocks to make multiple changes to the same file: + +``` +<<<<<<< SEARCH +def old_function(): + return "old value" +======= +def new_function(): + return "new value" +>>>>>>> REPLACE + +<<<<<<< SEARCH +import os +======= +import os +import sys +>>>>>>> REPLACE +``` + +IMPORTANT: + +- The SEARCH text must match EXACTLY (including whitespace, indentation, and line endings) +- The SEARCH text must appear exactly once in the file - if it appears multiple times, the tool will error +- Use at least 5 equals signs (=====) between SEARCH and REPLACE sections +- The tool will provide detailed error messages showing context if search text is not found +- Each search/replace block is applied in order, so later blocks see the results of earlier ones +- Be careful with escape sequences in string literals - use \n not \\n for newlines in code diff --git a/vibe/core/tools/builtins/prompts/todo.md b/vibe/core/tools/builtins/prompts/todo.md new file mode 100644 index 0000000..2463db5 --- /dev/null +++ b/vibe/core/tools/builtins/prompts/todo.md @@ -0,0 +1,199 @@ +Use the `todo` tool to manage a simple task list. This tool helps you track tasks and their progress. + +## How it works + +- **Reading:** Use `action: "read"` to view the current todo list +- **Writing:** Use `action: "write"` with the complete `todos` list to update. You must provide the ENTIRE list - this replaces everything. + +## Todo Structure +Each todo item has: +- `id`: A unique string identifier (e.g., "1", "2", "task-a") +- `content`: The task description +- `status`: One of: "pending", "in_progress", "completed", "cancelled" +- `priority`: One of: "high", "medium", "low" + +## When to Use This Tool + +**Use proactively for:** +- Complex multi-step tasks (3+ distinct steps) +- Non-trivial tasks requiring careful planning +- Multiple tasks provided by the user (numbered or comma-separated) +- Tracking progress on ongoing work +- After receiving new instructions - immediately capture requirements +- When starting work - mark task as in_progress BEFORE beginning +- After completing work - mark as completed and add any follow-up tasks discovered + +**Skip this tool for:** +- Single, straightforward tasks +- Trivial operations (< 3 simple steps) +- Purely conversational or informational requests +- Tasks that provide no organizational benefit + +## Task Management Best Practices + +1. **Status Management:** + - Only ONE task should be `in_progress` at a time + - Mark tasks `in_progress` BEFORE starting work on them + - Mark tasks `completed` IMMEDIATELY after finishing + - Keep tasks `in_progress` if blocked or encountering errors + +2. **Task Completion Rules:** + - ONLY mark as `completed` when FULLY accomplished + - Never mark complete if tests are failing, implementation is partial, or errors are unresolved + - When blocked, create a new task describing what needs resolution + +3. **Task Organization:** + - Create specific, actionable items + - Break complex tasks into manageable steps + - Use clear, descriptive task names + - Remove irrelevant tasks entirely (don't just mark cancelled) + +## Examples + +**Example 1: Reading todos** +```json +{ + "action": "read" +} +``` + +**Example 2: Initial task creation (user requests multiple features)** +```json +{ + "action": "write", + "todos": [ + { + "id": "1", + "content": "Add dark mode toggle to settings", + "status": "pending", + "priority": "high" + }, + { + "id": "2", + "content": "Implement theme context/state management", + "status": "pending", + "priority": "high" + }, + { + "id": "3", + "content": "Update components for theme switching", + "status": "pending", + "priority": "medium" + }, + { + "id": "4", + "content": "Run tests and verify build", + "status": "pending", + "priority": "medium" + } + ] +} +``` + +**Example 3: Starting work (marking one task in_progress)** +```json +{ + "action": "write", + "todos": [ + { + "id": "1", + "content": "Add dark mode toggle to settings", + "status": "in_progress", + "priority": "high" + }, + { + "id": "2", + "content": "Implement theme context/state management", + "status": "pending", + "priority": "high" + }, + { + "id": "3", + "content": "Update components for theme switching", + "status": "pending", + "priority": "medium" + }, + { + "id": "4", + "content": "Run tests and verify build", + "status": "pending", + "priority": "medium" + } + ] +} +``` + +**Example 4: Completing task and adding discovered subtask** +```json +{ + "action": "write", + "todos": [ + { + "id": "1", + "content": "Add dark mode toggle to settings", + "status": "completed", + "priority": "high" + }, + { + "id": "2", + "content": "Implement theme context/state management", + "status": "in_progress", + "priority": "high" + }, + { + "id": "3", + "content": "Update components for theme switching", + "status": "pending", + "priority": "medium" + }, + { + "id": "4", + "content": "Fix TypeScript errors in theme types", + "status": "pending", + "priority": "high" + }, + { + "id": "5", + "content": "Run tests and verify build", + "status": "pending", + "priority": "medium" + } + ] +} +``` + +**Example 5: Handling blockers (keeping task in_progress)** +```json +{ + "action": "write", + "todos": [ + { + "id": "1", + "content": "Deploy to production", + "status": "in_progress", + "priority": "high" + }, + { + "id": "2", + "content": "BLOCKER: Fix failing deployment pipeline", + "status": "pending", + "priority": "high" + }, + { + "id": "3", + "content": "Update documentation", + "status": "pending", + "priority": "low" + } + ] +} +``` + +## Common Scenarios + +**Multi-file refactoring:** Create todos for each file that needs updating +**Performance optimization:** List specific bottlenecks as individual tasks +**Bug fixing:** Track reproduction, diagnosis, fix, and verification as separate tasks +**Feature implementation:** Break down into UI, logic, tests, and documentation tasks + +Remember: When writing, you must include ALL todos you want to keep. Any todo not in the list will be removed. Be proactive with task management to demonstrate thoroughness and ensure all requirements are completed successfully. diff --git a/vibe/core/tools/builtins/prompts/write_file.md b/vibe/core/tools/builtins/prompts/write_file.md new file mode 100644 index 0000000..b3859b5 --- /dev/null +++ b/vibe/core/tools/builtins/prompts/write_file.md @@ -0,0 +1,42 @@ +Use `write_file` to write content to a file. + +**Arguments:** +- `path`: The file path (relative or absolute) +- `content`: The content to write to the file +- `overwrite`: Must be set to `true` to overwrite an existing file (default: `false`) + +**IMPORTANT SAFETY RULES:** + +- By default, the tool will **fail if the file already exists** to prevent accidental data loss +- To **overwrite** an existing file, you **MUST** set `overwrite: true` +- To **create a new file**, just provide the `path` and `content` (overwrite defaults to false) +- If parent directories don't exist, they will be created automatically + +**BEST PRACTICES:** + +- **ALWAYS** use the `read_file` tool first before overwriting an existing file to understand its current contents +- **ALWAYS** prefer using `search_replace` to edit existing files rather than overwriting them completely +- **NEVER** write new files unless explicitly required - prefer modifying existing files +- **NEVER** proactively create documentation files (*.md) or README files unless explicitly requested +- **AVOID** using emojis in file content unless the user explicitly requests them + +**Usage Examples:** + +```python +# Create a new file (will error if file exists) +write_file( + path="src/new_module.py", + content="def hello():\n return 'Hello World'" +) + +# Overwrite an existing file (must read it first!) +# First: read_file(path="src/existing.py") +# Then: +write_file( + path="src/existing.py", + content="# Updated content\ndef new_function():\n pass", + overwrite=True +) +``` + +**Remember:** For editing existing files, prefer `search_replace` over `write_file` to preserve unchanged portions and avoid accidental data loss. diff --git a/vibe/core/tools/builtins/read_file.py b/vibe/core/tools/builtins/read_file.py new file mode 100644 index 0000000..b60b7d7 --- /dev/null +++ b/vibe/core/tools/builtins/read_file.py @@ -0,0 +1,231 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, ClassVar, NamedTuple, final + +import aiofiles +from pydantic import BaseModel, Field + +from vibe.core.tools.base import ( + BaseTool, + BaseToolConfig, + BaseToolState, + ToolError, + ToolPermission, +) +from vibe.core.tools.ui import ToolCallDisplay, ToolResultDisplay, ToolUIData + +if TYPE_CHECKING: + from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class _ReadResult(NamedTuple): + lines: list[str] + bytes_read: int + was_truncated: bool + + +class ReadFileArgs(BaseModel): + path: str + offset: int = Field( + default=0, + description="Line number to start reading from (0-indexed, inclusive).", + ) + limit: int | None = Field( + default=None, description="Maximum number of lines to read." + ) + + +class ReadFileResult(BaseModel): + path: str + content: str + lines_read: int + was_truncated: bool = Field( + description="True if the reading was stopped due to the max_read_bytes limit." + ) + + +class ReadFileToolConfig(BaseToolConfig): + permission: ToolPermission = ToolPermission.ALWAYS + + max_read_bytes: int = Field( + default=64_000, description="Maximum total bytes to read from a file in one go." + ) + max_state_history: int = Field( + default=10, description="Number of recently read files to remember in state." + ) + + +class ReadFileState(BaseToolState): + recently_read_files: list[str] = Field(default_factory=list) + + +class ReadFile( + BaseTool[ReadFileArgs, ReadFileResult, ReadFileToolConfig, ReadFileState], + ToolUIData[ReadFileArgs, ReadFileResult], +): + description: ClassVar[str] = ( + "Read a UTF-8 file, returning content from a specific line range. " + "Reading is capped by a byte limit for safety." + ) + + @final + async def run(self, args: ReadFileArgs) -> ReadFileResult: + file_path = self._prepare_and_validate_path(args) + + read_result = await self._read_file(args, file_path) + + self._update_state_history(file_path) + + return ReadFileResult( + path=str(file_path), + content="".join(read_result.lines), + lines_read=len(read_result.lines), + was_truncated=read_result.was_truncated, + ) + + def check_allowlist_denylist(self, args: ReadFileArgs) -> ToolPermission | None: + import fnmatch + + file_path = Path(args.path).expanduser() + if not file_path.is_absolute(): + file_path = self.config.effective_workdir / file_path + file_str = str(file_path) + + for pattern in self.config.denylist: + if fnmatch.fnmatch(file_str, pattern): + return ToolPermission.NEVER + + for pattern in self.config.allowlist: + if fnmatch.fnmatch(file_str, pattern): + return ToolPermission.ALWAYS + + return None + + def _prepare_and_validate_path(self, args: ReadFileArgs) -> Path: + self._validate_inputs(args) + + file_path = Path(args.path).expanduser() + if not file_path.is_absolute(): + file_path = self.config.effective_workdir / file_path + + self._validate_path(file_path) + return file_path + + async def _read_file(self, args: ReadFileArgs, file_path: Path) -> _ReadResult: + try: + lines_to_return: list[str] = [] + bytes_read = 0 + was_truncated = False + + async with aiofiles.open(file_path, encoding="utf-8", errors="ignore") as f: + line_index = 0 + async for line in f: + if line_index < args.offset: + line_index += 1 + continue + + if args.limit is not None and len(lines_to_return) >= args.limit: + break + + line_bytes = len(line.encode("utf-8")) + if bytes_read + line_bytes > self.config.max_read_bytes: + was_truncated = True + break + + lines_to_return.append(line) + bytes_read += line_bytes + line_index += 1 + + return _ReadResult( + lines=lines_to_return, + bytes_read=bytes_read, + was_truncated=was_truncated, + ) + + except OSError as exc: + raise ToolError(f"Error reading {file_path}: {exc}") from exc + + def _validate_inputs(self, args: ReadFileArgs) -> None: + if not args.path.strip(): + raise ToolError("Path cannot be empty") + if args.offset < 0: + raise ToolError("Offset cannot be negative") + if args.limit is not None and args.limit <= 0: + raise ToolError("Limit, if provided, must be a positive number") + + def _validate_path(self, file_path: Path) -> None: + try: + resolved_path = file_path.resolve() + except ValueError: + raise ToolError( + f"Security error: Cannot read path '{file_path}' outside of the project directory '{self.config.effective_workdir}'." + ) + except FileNotFoundError: + raise ToolError(f"File not found at: {file_path}") + + if not resolved_path.exists(): + raise ToolError(f"File not found at: {file_path}") + if resolved_path.is_dir(): + raise ToolError(f"Path is a directory, not a file: {file_path}") + + def _update_state_history(self, file_path: Path) -> None: + self.state.recently_read_files.append(str(file_path.resolve())) + if len(self.state.recently_read_files) > self.config.max_state_history: + self.state.recently_read_files.pop(0) + + @classmethod + def get_call_display(cls, event: ToolCallEvent) -> ToolCallDisplay: + if not isinstance(event.args, ReadFileArgs): + return ToolCallDisplay(summary="read_file") + + summary = f"read_file: {event.args.path}" + if event.args.offset > 0 or event.args.limit is not None: + parts = [] + if event.args.offset > 0: + parts.append(f"from line {event.args.offset}") + if event.args.limit is not None: + parts.append(f"limit {event.args.limit} lines") + summary += f" ({', '.join(parts)})" + + return ToolCallDisplay( + summary=summary, + details={ + "path": event.args.path, + "offset": event.args.offset, + "limit": event.args.limit, + }, + ) + + @classmethod + def get_result_display(cls, event: ToolResultEvent) -> ToolResultDisplay: + if not isinstance(event.result, ReadFileResult): + return ToolResultDisplay( + success=False, message=event.error or event.skip_reason or "No result" + ) + + path_obj = Path(event.result.path) + message = f"Read {event.result.lines_read} line{'' if event.result.lines_read <= 1 else 's'} from {path_obj.name}" + if event.result.was_truncated: + message += " (truncated)" + + return ToolResultDisplay( + success=True, + message=message, + warnings=["File was truncated due to size limit"] + if event.result.was_truncated + else [], + details={ + "path": str(event.result.path), + "lines_read": event.result.lines_read, + "was_truncated": event.result.was_truncated, + "content": event.result.content, + "file_extension": path_obj.suffix.lstrip(".") + if path_obj.suffix + else "text", + }, + ) + + @classmethod + def get_status_text(cls) -> str: + return "Reading file" diff --git a/vibe/core/tools/builtins/search_replace.py b/vibe/core/tools/builtins/search_replace.py new file mode 100644 index 0000000..238ad02 --- /dev/null +++ b/vibe/core/tools/builtins/search_replace.py @@ -0,0 +1,454 @@ +from __future__ import annotations + +import difflib +from pathlib import Path +import re +import shutil +from typing import ClassVar, NamedTuple, final + +import aiofiles +from pydantic import BaseModel, Field + +from vibe.core.tools.base import BaseTool, BaseToolConfig, BaseToolState, ToolError +from vibe.core.tools.ui import ToolCallDisplay, ToolResultDisplay, ToolUIData +from vibe.core.types import ToolCallEvent, ToolResultEvent + +_BLOCK_RE = re.compile( + r"<{5,} SEARCH\r?\n(.*?)\r?\n?={5,}\r?\n(.*?)\r?\n?>{5,} REPLACE", flags=re.DOTALL +) + +_BLOCK_WITH_FENCE_RE = re.compile( + r"```[\s\S]*?\n<{5,} SEARCH\r?\n(.*?)\r?\n?={5,}\r?\n(.*?)\r?\n?>{5,} REPLACE\s*\n```", + flags=re.DOTALL, +) + + +class SearchReplaceBlock(NamedTuple): + search: str + replace: str + + +class FuzzyMatch(NamedTuple): + similarity: float + start_line: int + end_line: int + text: str + + +class BlockApplyResult(NamedTuple): + content: str + applied: int + errors: list[str] + warnings: list[str] + + +class SearchReplaceArgs(BaseModel): + file_path: str + content: str + + +class SearchReplaceResult(BaseModel): + file: str + blocks_applied: int + lines_changed: int + content: str + warnings: list[str] = Field(default_factory=list) + + +class SearchReplaceConfig(BaseToolConfig): + max_content_size: int = 100_000 + create_backup: bool = False + fuzzy_threshold: float = 0.9 + + +class SearchReplaceState(BaseToolState): + pass + + +class SearchReplace( + BaseTool[ + SearchReplaceArgs, SearchReplaceResult, SearchReplaceConfig, SearchReplaceState + ], + ToolUIData[SearchReplaceArgs, SearchReplaceResult], +): + description: ClassVar[str] = ( + "Replace sections of files using SEARCH/REPLACE blocks. " + "Supports fuzzy matching and detailed error reporting. " + "Format: <<<<<<< SEARCH\\n[text]\\n=======\\n[replacement]\\n>>>>>>> REPLACE" + ) + + @classmethod + def get_call_display(cls, event: ToolCallEvent) -> ToolCallDisplay: + if not isinstance(event.args, SearchReplaceArgs): + return ToolCallDisplay(summary="Invalid arguments") + + args = event.args + blocks = cls._parse_search_replace_blocks(args.content) + + return ToolCallDisplay( + summary=f"Patching {args.file_path} ({len(blocks)} blocks)", + content=args.content, + details={ + "path": args.file_path, + "blocks_count": len(blocks), + "original_path": args.file_path, + }, + ) + + @classmethod + def get_result_display(cls, event: ToolResultEvent) -> ToolResultDisplay: + if isinstance(event.result, SearchReplaceResult): + return ToolResultDisplay( + success=True, + message=f"Applied {event.result.blocks_applied} blocks", + warnings=event.result.warnings, + details={ + "lines_changed": event.result.lines_changed, + "content": event.result.content, + }, + ) + + return ToolResultDisplay(success=True, message="Patch applied") + + @classmethod + def get_status_text(cls) -> str: + return "Editing files" + + @final + async def run(self, args: SearchReplaceArgs) -> SearchReplaceResult: + file_path, search_replace_blocks = self._prepare_and_validate_args(args) + + original_content = await self._read_file(file_path) + + block_result = self._apply_blocks( + original_content, + search_replace_blocks, + file_path, + self.config.fuzzy_threshold, + ) + + if block_result.errors: + error_message = "SEARCH/REPLACE blocks failed:\n" + "\n\n".join( + block_result.errors + ) + if block_result.warnings: + error_message += "\n\nWarnings encountered:\n" + "\n".join( + block_result.warnings + ) + raise ToolError(error_message) + + modified_content = block_result.content + + # Calculate line changes + if modified_content == original_content: + lines_changed = 0 + else: + original_lines = len(original_content.splitlines()) + new_lines = len(modified_content.splitlines()) + lines_changed = new_lines - original_lines + + try: + if self.config.create_backup: + await self._backup_file(file_path) + except Exception: + pass + + await self._write_file(file_path, modified_content) + + return SearchReplaceResult( + file=str(file_path), + blocks_applied=block_result.applied, + lines_changed=lines_changed, + warnings=block_result.warnings, + content=args.content, + ) + + @final + def _prepare_and_validate_args( + self, args: SearchReplaceArgs + ) -> tuple[Path, list[SearchReplaceBlock]]: + file_path_str = args.file_path.strip() + content = args.content.strip() + + if not file_path_str: + raise ToolError("File path cannot be empty") + + if len(content) > self.config.max_content_size: + raise ToolError( + f"Content size ({len(content)} bytes) exceeds max_content_size " + f"({self.config.max_content_size} bytes)" + ) + + if not content: + raise ToolError("Empty content provided") + + project_root = self.config.effective_workdir + file_path = Path(file_path_str).expanduser() + if not file_path.is_absolute(): + file_path = project_root / file_path + file_path = file_path.resolve() + + if not file_path.exists(): + raise ToolError(f"File does not exist: {file_path}") + + if not file_path.is_file(): + raise ToolError(f"Path is not a file: {file_path}") + + search_replace_blocks = self._parse_search_replace_blocks(content) + if not search_replace_blocks: + raise ToolError( + "No valid SEARCH/REPLACE blocks found in content.\n" + "Expected format:\n" + "<<<<<<< SEARCH\n" + "[exact content to find]\n" + "=======\n" + "[new content to replace with]\n" + ">>>>>>> REPLACE" + ) + + return file_path, search_replace_blocks + + async def _read_file(self, file_path: Path) -> str: + try: + async with aiofiles.open(file_path, encoding="utf-8") as f: + return await f.read() + except UnicodeDecodeError as e: + raise ToolError(f"Unicode decode error reading {file_path}: {e}") from e + except PermissionError: + raise ToolError(f"Permission denied reading file: {file_path}") + except Exception as e: + raise ToolError(f"Unexpected error reading {file_path}: {e}") from e + + async def _backup_file(self, file_path: Path) -> None: + shutil.copy2(file_path, file_path.with_suffix(file_path.suffix + ".bak")) + + async def _write_file(self, file_path: Path, content: str) -> None: + try: + async with aiofiles.open(file_path, mode="w", encoding="utf-8") as f: + await f.write(content) + except PermissionError: + raise ToolError(f"Permission denied writing to file: {file_path}") + except OSError as e: + raise ToolError(f"OS error writing to {file_path}: {e}") from e + except Exception as e: + raise ToolError(f"Unexpected error writing to {file_path}: {e}") from e + + @final + @staticmethod + def _apply_blocks( + content: str, + blocks: list[SearchReplaceBlock], + filepath: Path, + fuzzy_threshold: float = 0.9, + ) -> BlockApplyResult: + applied = 0 + errors: list[str] = [] + warnings: list[str] = [] + current_content = content + + for i, (search, replace) in enumerate(blocks, 1): + if search not in current_content: + context = SearchReplace._find_search_context(current_content, search) + fuzzy_context = SearchReplace._find_fuzzy_match_context( + current_content, search, fuzzy_threshold + ) + + error_msg = ( + f"SEARCH/REPLACE block {i} failed: Search text not found in {filepath}\n" + f"Search text was:\n{search!r}\n" + f"Context analysis:\n{context}" + ) + + if fuzzy_context: + error_msg += f"\n{fuzzy_context}" + + error_msg += ( + "\nDebugging tips:\n" + "1. Check for exact whitespace/indentation match\n" + "2. Verify line endings match the file exactly (\\r\\n vs \\n)\n" + "3. Ensure the search text hasn't been modified by previous blocks or user edits\n" + "4. Check for typos or case sensitivity issues" + ) + + errors.append(error_msg) + continue + + occurrences = current_content.count(search) + if occurrences > 1: + warning_msg = ( + f"Search text in block {i} appears {occurrences} times in the file. " + f"Only the first occurrence will be replaced. Consider making your " + f"search pattern more specific to avoid unintended changes." + ) + warnings.append(warning_msg) + + current_content = current_content.replace(search, replace, 1) + applied += 1 + + return BlockApplyResult( + content=current_content, applied=applied, errors=errors, warnings=warnings + ) + + @final + @staticmethod + def _find_fuzzy_match_context( + content: str, search_text: str, threshold: float = 0.9 + ) -> str | None: + best_match = SearchReplace._find_best_fuzzy_match( + content, search_text, threshold + ) + + if not best_match: + return None + + diff = SearchReplace._create_unified_diff( + search_text, best_match.text, "SEARCH", "CLOSEST MATCH" + ) + + similarity_pct = best_match.similarity * 100 + + return ( + f"Closest fuzzy match (similarity {similarity_pct:.1f}%) " + f"at lines {best_match.start_line}–{best_match.end_line}:\n" + f"```diff\n{diff}\n```" + ) + + @final + @staticmethod + def _find_best_fuzzy_match( # noqa: PLR0914 + content: str, search_text: str, threshold: float = 0.9 + ) -> FuzzyMatch | None: + content_lines = content.split("\n") + search_lines = search_text.split("\n") + window_size = len(search_lines) + + if window_size == 0: + return None + + non_empty_search = [line for line in search_lines if line.strip()] + if not non_empty_search: + return None + + first_anchor = non_empty_search[0] + last_anchor = ( + non_empty_search[-1] if len(non_empty_search) > 1 else first_anchor + ) + + candidate_starts = set() + spread = 5 + + for i, line in enumerate(content_lines): + if first_anchor in line or last_anchor in line: + start_min = max(0, i - spread) + start_max = min(len(content_lines) - window_size + 1, i + spread + 1) + for s in range(start_min, start_max): + candidate_starts.add(s) + + if not candidate_starts: + max_positions = min(len(content_lines) - window_size + 1, 100) + candidate_starts = set(range(0, max_positions)) + + best_match = None + best_similarity = 0.0 + + for start in candidate_starts: + end = start + window_size + window_text = "\n".join(content_lines[start:end]) + + matcher = difflib.SequenceMatcher(None, search_text, window_text) + similarity = matcher.ratio() + + if similarity >= threshold and similarity > best_similarity: + best_similarity = similarity + best_match = FuzzyMatch( + similarity=similarity, + start_line=start + 1, # 1-based line numbers + end_line=end, + text=window_text, + ) + + return best_match + + @final + @staticmethod + def _create_unified_diff( + text1: str, text2: str, label1: str = "SEARCH", label2: str = "CLOSEST MATCH" + ) -> str: + lines1 = text1.splitlines(keepends=True) + lines2 = text2.splitlines(keepends=True) + + lines1 = [line if line.endswith("\n") else line + "\n" for line in lines1] + lines2 = [line if line.endswith("\n") else line + "\n" for line in lines2] + + diff = difflib.unified_diff( + lines1, lines2, fromfile=label1, tofile=label2, lineterm="", n=3 + ) + + diff_lines = list(diff) + + if diff_lines and not diff_lines[0].startswith("==="): + diff_lines.insert(2, "=" * 67 + "\n") + + result = "".join(diff_lines) + + max_chars = 2000 + if len(result) > max_chars: + result = result[:max_chars] + "\n...(diff truncated)" + + return result.rstrip() + + @final + @staticmethod + def _parse_search_replace_blocks(content: str) -> list[SearchReplaceBlock]: + """Parse SEARCH/REPLACE blocks from content. + + Supports two formats: + 1. With code block fences (```...```) + 2. Without code block fences + """ + matches = _BLOCK_WITH_FENCE_RE.findall(content) + + if not matches: + matches = _BLOCK_RE.findall(content) + + return [ + SearchReplaceBlock( + search=search.rstrip("\r\n"), replace=replace.rstrip("\r\n") + ) + for search, replace in matches + ] + + @final + @staticmethod + def _find_search_context( + content: str, search_text: str, max_context: int = 5 + ) -> str: + lines = content.split("\n") + search_lines = search_text.split("\n") + + if not search_lines: + return "Search text is empty" + + first_search_line = search_lines[0].strip() + if not first_search_line: + return "First line of search text is empty or whitespace only" + + matches = [] + for i, line in enumerate(lines): + if first_search_line in line: + matches.append(i) + + if not matches: + return f"First search line '{first_search_line}' not found anywhere in file" + + context_lines = [] + for match_idx in matches[:3]: + start = max(0, match_idx - max_context) + end = min(len(lines), match_idx + max_context + 1) + + context_lines.append(f"\nPotential match area around line {match_idx + 1}:") + for i in range(start, end): + marker = ">>>" if i == match_idx else " " + context_lines.append(f"{marker} {i + 1:3d}: {lines[i]}") + + return "\n".join(context_lines) diff --git a/vibe/core/tools/builtins/todo.py b/vibe/core/tools/builtins/todo.py new file mode 100644 index 0000000..6ce1a6f --- /dev/null +++ b/vibe/core/tools/builtins/todo.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +from enum import StrEnum, auto +from typing import ClassVar + +from pydantic import BaseModel, Field + +from vibe.core.tools.base import ( + BaseTool, + BaseToolConfig, + BaseToolState, + ToolError, + ToolPermission, +) +from vibe.core.tools.ui import ToolCallDisplay, ToolResultDisplay, ToolUIData +from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class TodoStatus(StrEnum): + PENDING = auto() + IN_PROGRESS = auto() + COMPLETED = auto() + CANCELLED = auto() + + +class TodoPriority(StrEnum): + LOW = auto() + MEDIUM = auto() + HIGH = auto() + + +class TodoItem(BaseModel): + id: str + content: str + status: TodoStatus = TodoStatus.PENDING + priority: TodoPriority = TodoPriority.MEDIUM + + +class TodoArgs(BaseModel): + action: str = Field(description="Either 'read' or 'write'") + todos: list[TodoItem] | None = Field( + default=None, description="Complete list of todos when writing." + ) + + +class TodoResult(BaseModel): + message: str + todos: list[TodoItem] + total_count: int + + +class TodoConfig(BaseToolConfig): + permission: ToolPermission = ToolPermission.ALWAYS + max_todos: int = 100 + + +class TodoState(BaseToolState): + todos: list[TodoItem] = Field(default_factory=list) + + +class Todo( + BaseTool[TodoArgs, TodoResult, TodoConfig, TodoState], + ToolUIData[TodoArgs, TodoResult], +): + description: ClassVar[str] = ( + "Manage todos. Use action='read' to view, action='write' with complete list to update." + ) + + @classmethod + def get_call_display(cls, event: ToolCallEvent) -> ToolCallDisplay: + if not isinstance(event.args, TodoArgs): + return ToolCallDisplay(summary="Invalid arguments") + + args = event.args + + match args.action: + case "read": + return ToolCallDisplay( + summary="Reading todos", details={"action": "read"} + ) + case "write": + count = len(args.todos) if args.todos else 0 + return ToolCallDisplay( + summary=f"Writing {count} todos", + details={"action": "write", "count": count}, + ) + case _: + return ToolCallDisplay( + summary=f"Unknown action: {args.action}", + details={"action": args.action}, + ) + + @classmethod + def get_result_display(cls, event: ToolResultEvent) -> ToolResultDisplay: + if not isinstance(event.result, TodoResult): + return ToolResultDisplay(success=True, message="Success") + + result = event.result + + by_status = {"in_progress": [], "pending": [], "completed": [], "cancelled": []} + + for todo in result.todos: + by_status[todo.status].append({"content": todo.content, "id": todo.id}) + + return ToolResultDisplay( + success=True, + message=result.message, + details={"todos_by_status": by_status, "total_count": result.total_count}, + ) + + @classmethod + def get_status_text(cls) -> str: + return "Managing todos" + + async def run(self, args: TodoArgs) -> TodoResult: + match args.action: + case "read": + return self._read_todos() + case "write": + return self._write_todos(args.todos or []) + case _: + raise ToolError( + f"Invalid action '{args.action}'. Use 'read' or 'write'." + ) + + def _read_todos(self) -> TodoResult: + return TodoResult( + message=f"Retrieved {len(self.state.todos)} todos", + todos=self.state.todos, + total_count=len(self.state.todos), + ) + + def _write_todos(self, todos: list[TodoItem]) -> TodoResult: + if len(todos) > self.config.max_todos: + raise ToolError(f"Cannot store more than {self.config.max_todos} todos") + + ids = [todo.id for todo in todos] + if len(ids) != len(set(ids)): + raise ToolError("Todo IDs must be unique") + + self.state.todos = todos + + return TodoResult( + message=f"Updated {len(todos)} todos", + todos=self.state.todos, + total_count=len(self.state.todos), + ) diff --git a/vibe/core/tools/builtins/write_file.py b/vibe/core/tools/builtins/write_file.py new file mode 100644 index 0000000..ac08fee --- /dev/null +++ b/vibe/core/tools/builtins/write_file.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +from pathlib import Path +from typing import ClassVar, final + +import aiofiles +from pydantic import BaseModel, Field + +from vibe.core.tools.base import ( + BaseTool, + BaseToolConfig, + BaseToolState, + ToolError, + ToolPermission, +) +from vibe.core.tools.ui import ToolCallDisplay, ToolResultDisplay, ToolUIData +from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class WriteFileArgs(BaseModel): + path: str + content: str + overwrite: bool = Field( + default=False, description="Must be set to true to overwrite an existing file." + ) + + +class WriteFileResult(BaseModel): + path: str + bytes_written: int + file_existed: bool + content: str + + +class WriteFileConfig(BaseToolConfig): + permission: ToolPermission = ToolPermission.ASK + max_write_bytes: int = 64_000 + create_parent_dirs: bool = True + + +class WriteFileState(BaseToolState): + recently_written_files: list[str] = Field(default_factory=list) + + +class WriteFile( + BaseTool[WriteFileArgs, WriteFileResult, WriteFileConfig, WriteFileState], + ToolUIData[WriteFileArgs, WriteFileResult], +): + description: ClassVar[str] = ( + "Create or overwrite a UTF-8 file. Fails if file exists unless 'overwrite=True'." + ) + + @classmethod + def get_call_display(cls, event: ToolCallEvent) -> ToolCallDisplay: + if not isinstance(event.args, WriteFileArgs): + return ToolCallDisplay(summary="Invalid arguments") + + args = event.args + file_ext = Path(args.path).suffix.lstrip(".") + + return ToolCallDisplay( + summary=f"Writing {args.path}{' (overwrite)' if args.overwrite else ''}", + content=args.content, + details={ + "path": args.path, + "overwrite": args.overwrite, + "file_extension": file_ext, + "content": args.content, + }, + ) + + @classmethod + def get_result_display(cls, event: ToolResultEvent) -> ToolResultDisplay: + if isinstance(event.result, WriteFileResult): + action = "Overwritten" if event.result.file_existed else "Created" + return ToolResultDisplay( + success=True, + message=f"{action} {Path(event.result.path).name}", + details={ + "bytes_written": event.result.bytes_written, + "path": event.result.path, + "content": event.result.content, + }, + ) + + return ToolResultDisplay(success=True, message="File written") + + @classmethod + def get_status_text(cls) -> str: + return "Writing file" + + def check_allowlist_denylist(self, args: WriteFileArgs) -> ToolPermission | None: + import fnmatch + + file_path = Path(args.path).expanduser() + if not file_path.is_absolute(): + file_path = self.config.effective_workdir / file_path + file_str = str(file_path) + + for pattern in self.config.denylist: + if fnmatch.fnmatch(file_str, pattern): + return ToolPermission.NEVER + + for pattern in self.config.allowlist: + if fnmatch.fnmatch(file_str, pattern): + return ToolPermission.ALWAYS + + return None + + @final + async def run(self, args: WriteFileArgs) -> WriteFileResult: + file_path, file_existed, content_bytes = self._prepare_and_validate_path(args) + + await self._write_file(args, file_path) + + BUFFER_SIZE = 10 + self.state.recently_written_files.append(str(file_path)) + if len(self.state.recently_written_files) > BUFFER_SIZE: + self.state.recently_written_files.pop(0) + + return WriteFileResult( + path=str(file_path), + bytes_written=content_bytes, + file_existed=file_existed, + content=args.content, + ) + + def _prepare_and_validate_path(self, args: WriteFileArgs) -> tuple[Path, bool, int]: + if not args.path.strip(): + raise ToolError("Path cannot be empty") + + content_bytes = len(args.content.encode("utf-8")) + if content_bytes > self.config.max_write_bytes: + raise ToolError( + f"Content exceeds {self.config.max_write_bytes} bytes limit" + ) + + file_path = Path(args.path).expanduser() + if not file_path.is_absolute(): + file_path = self.config.effective_workdir / file_path + file_path = file_path.resolve() + + try: + file_path.relative_to(self.config.effective_workdir.resolve()) + except ValueError: + raise ToolError(f"Cannot write outside project directory: {file_path}") + + file_existed = file_path.exists() + + if file_existed and not args.overwrite: + raise ToolError( + f"File '{file_path}' exists. Set overwrite=True to replace." + ) + + if self.config.create_parent_dirs: + file_path.parent.mkdir(parents=True, exist_ok=True) + elif not file_path.parent.exists(): + raise ToolError(f"Parent directory does not exist: {file_path.parent}") + + return file_path, file_existed, content_bytes + + async def _write_file(self, args: WriteFileArgs, file_path: Path) -> None: + try: + async with aiofiles.open(file_path, mode="w", encoding="utf-8") as f: + await f.write(args.content) + except Exception as e: + raise ToolError(f"Error writing {file_path}: {e}") from e diff --git a/vibe/core/tools/manager.py b/vibe/core/tools/manager.py new file mode 100644 index 0000000..4020616 --- /dev/null +++ b/vibe/core/tools/manager.py @@ -0,0 +1,273 @@ +from __future__ import annotations + +from collections.abc import Iterator +import importlib.util +import inspect +from logging import getLogger +from pathlib import Path +import re +import sys +from typing import TYPE_CHECKING, Any + +from vibe import VIBE_ROOT +from vibe.core.config import get_vibe_home +from vibe.core.tools.base import BaseTool, BaseToolConfig +from vibe.core.tools.mcp import ( + RemoteTool, + create_mcp_http_proxy_tool_class, + create_mcp_stdio_proxy_tool_class, + list_tools_http, + list_tools_stdio, +) +from vibe.core.utils import run_sync + +logger = getLogger("vibe") + +if TYPE_CHECKING: + from vibe.core.config import MCPHttp, MCPStdio, MCPStreamableHttp, VibeConfig + + +class NoSuchToolError(Exception): + """Exception raised when a tool is not found.""" + + +DEFAULT_TOOL_DIR = VIBE_ROOT / "core" / "tools" / "builtins" + + +class ToolManager: + """Manages tool discovery and instantiation for an Agent. + + Discovers available tools from the provided search paths. Each Agent + should have its own ToolManager instance. + """ + + def __init__(self, config: VibeConfig) -> None: + self._config = config + self._instances: dict[str, BaseTool] = {} + self._search_paths: list[Path] = self._compute_search_paths(config) + + self._available: dict[str, type[BaseTool]] = { + cls.get_name(): cls for cls in self._iter_tool_classes(self._search_paths) + } + self._integrate_mcp() + + @staticmethod + def _compute_search_paths(config: VibeConfig) -> list[Path]: + paths: list[Path] = [DEFAULT_TOOL_DIR] + + for p in config.tool_paths: + path = Path(p).expanduser().resolve() + if path.is_dir(): + paths.append(path) + + cwd = config.effective_workdir + for directory in (cwd, *cwd.parents): + tools_dir = directory / ".vibe" / "tools" + if tools_dir.is_dir(): + paths.append(tools_dir) + break + + global_tools = get_vibe_home() / "tools" + if global_tools.is_dir(): + paths.append(global_tools) + + unique: list[Path] = [] + seen: set[Path] = set() + for p in paths: + rp = p.resolve() + if rp not in seen: + seen.add(rp) + unique.append(rp) + return unique + + @staticmethod + def _iter_tool_classes(search_paths: list[Path]) -> Iterator[type[BaseTool]]: + for base in search_paths: + if not base.is_dir(): + continue + + for path in base.rglob("*.py"): + if not path.is_file(): + continue + name = path.name + if name.startswith("_"): + continue + + stem = re.sub(r"[^0-9A-Za-z_]", "_", path.stem) or "mod" + module_name = f"vibe_tools_discovered_{stem}" + + spec = importlib.util.spec_from_file_location(module_name, path) + if spec is None or spec.loader is None: + continue + module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = module + try: + spec.loader.exec_module(module) + except Exception: + continue + + for obj in vars(module).values(): + if not inspect.isclass(obj): + continue + if not issubclass(obj, BaseTool) or obj is BaseTool: + continue + if inspect.isabstract(obj): + continue + yield obj + + @staticmethod + def discover_tool_defaults( + search_paths: list[Path] | None = None, + ) -> dict[str, dict[str, Any]]: + if search_paths is None: + search_paths = [DEFAULT_TOOL_DIR] + + defaults: dict[str, dict[str, Any]] = {} + for cls in ToolManager._iter_tool_classes(search_paths): + try: + tool_name = cls.get_name() + config_class = cls._get_tool_config_class() + defaults[tool_name] = config_class().model_dump(exclude_none=True) + except Exception as e: + logger.warning( + "Failed to get defaults for tool %s: %s", cls.__name__, e + ) + continue + return defaults + + def available_tools(self) -> dict[str, type[BaseTool]]: + return dict(self._available) + + def _integrate_mcp(self) -> None: + if not self._config.mcp_servers: + return + run_sync(self._integrate_mcp_async()) + + async def _integrate_mcp_async(self) -> None: + try: + http_count = 0 + stdio_count = 0 + + for srv in self._config.mcp_servers: + match srv.transport: + case "http" | "streamable-http": + http_count += await self._register_http_server(srv) + case "stdio": + stdio_count += await self._register_stdio_server(srv) + case _: + logger.warning("Unsupported MCP transport: %r", srv.transport) + + logger.info( + "MCP integration registered %d tools (http=%d, stdio=%d)", + http_count + stdio_count, + http_count, + stdio_count, + ) + except Exception as exc: + logger.warning("Failed to integrate MCP tools: %s", exc) + + async def _register_http_server(self, srv: MCPHttp | MCPStreamableHttp) -> int: + url = (srv.url or "").strip() + if not url: + logger.warning("MCP server '%s' missing url for http transport", srv.name) + return 0 + + headers = srv.http_headers() + try: + tools: list[RemoteTool] = await list_tools_http(url, headers=headers) + except Exception as exc: + logger.warning("MCP HTTP discovery failed for %s: %s", url, exc) + return 0 + + added = 0 + for remote in tools: + try: + proxy_cls = create_mcp_http_proxy_tool_class( + url=url, + remote=remote, + alias=srv.name, + server_hint=srv.prompt, + headers=headers, + ) + self._available[proxy_cls.get_name()] = proxy_cls + added += 1 + except Exception as exc: + logger.warning( + "Failed to register MCP HTTP tool '%s' from %s: %r", + getattr(remote, "name", ""), + url, + exc, + ) + return added + + async def _register_stdio_server(self, srv: MCPStdio) -> int: + cmd = srv.argv() + if not cmd: + logger.warning("MCP stdio server '%s' has invalid/empty command", srv.name) + return 0 + + try: + tools: list[RemoteTool] = await list_tools_stdio(cmd) + except Exception as exc: + logger.warning("MCP stdio discovery failed for %r: %s", cmd, exc) + return 0 + + added = 0 + for remote in tools: + try: + proxy_cls = create_mcp_stdio_proxy_tool_class( + command=cmd, remote=remote, alias=srv.name, server_hint=srv.prompt + ) + self._available[proxy_cls.get_name()] = proxy_cls + added += 1 + except Exception as exc: + logger.warning( + "Failed to register MCP stdio tool '%s' from %r: %r", + getattr(remote, "name", ""), + cmd, + exc, + ) + return added + + def get_tool_config(self, tool_name: str) -> BaseToolConfig: + tool_class = self._available.get(tool_name) + + if tool_class: + config_class = tool_class._get_tool_config_class() + default_config = config_class() + else: + config_class = BaseToolConfig + default_config = BaseToolConfig() + + user_overrides = self._config.tools.get(tool_name) + if user_overrides is None: + merged_dict = default_config.model_dump() + else: + merged_dict = {**default_config.model_dump(), **user_overrides.model_dump()} + + if self._config.workdir is not None: + merged_dict["workdir"] = self._config.workdir + + return config_class.model_validate(merged_dict) + + def get(self, tool_name: str) -> BaseTool: + """Get a tool instance, creating it lazily on first call. + + Raises: + NoSuchToolError: If the requested tool is not available. + """ + if tool_name in self._instances: + return self._instances[tool_name] + + if tool_name not in self._available: + raise NoSuchToolError( + f"Unknown tool: {tool_name}. Available: {list(self._available.keys())}" + ) + + tool_class = self._available[tool_name] + tool_config = self.get_tool_config(tool_name) + self._instances[tool_name] = tool_class.from_config(tool_config) + return self._instances[tool_name] + + def reset_all(self) -> None: + self._instances.clear() diff --git a/vibe/core/tools/mcp.py b/vibe/core/tools/mcp.py new file mode 100644 index 0000000..16ac996 --- /dev/null +++ b/vibe/core/tools/mcp.py @@ -0,0 +1,313 @@ +from __future__ import annotations + +import hashlib +from pathlib import Path +from typing import TYPE_CHECKING, Any, ClassVar + +from mcp import ClientSession +from mcp.client.stdio import StdioServerParameters, stdio_client +from mcp.client.streamable_http import streamablehttp_client +from pydantic import BaseModel, ConfigDict, Field, field_validator + +from vibe.core.tools.base import BaseTool, BaseToolConfig, BaseToolState, ToolError +from vibe.core.tools.ui import ToolCallDisplay, ToolResultDisplay + +if TYPE_CHECKING: + from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class _OpenArgs(BaseModel): + model_config = ConfigDict(extra="allow") + + +class MCPToolResult(BaseModel): + ok: bool = True + server: str + tool: str + text: str | None = None + structured: dict[str, Any] | None = None + + +class RemoteTool(BaseModel): + model_config = ConfigDict(from_attributes=True) + + name: str + description: str | None = None + input_schema: dict[str, Any] = Field( + default_factory=lambda: {"type": "object", "properties": {}}, + validation_alias="inputSchema", + ) + + @field_validator("name") + @classmethod + def _non_empty_name(cls, v: str) -> str: + if not isinstance(v, str) or not v.strip(): + raise ValueError("MCP tool missing valid 'name'") + return v + + @field_validator("input_schema", mode="before") + @classmethod + def _normalize_schema(cls, v: Any) -> dict[str, Any]: + if v is None: + return {"type": "object", "properties": {}} + if isinstance(v, dict): + return v + dump = getattr(v, "model_dump", None) + if callable(dump): + try: + v = dump() + except Exception: + return {"type": "object", "properties": {}} + return v if isinstance(v, dict) else {"type": "object", "properties": {}} + + +class _MCPContentBlock(BaseModel): + model_config = ConfigDict(from_attributes=True) + text: str | None = None + + +class _MCPResultIn(BaseModel): + model_config = ConfigDict(from_attributes=True) + + structuredContent: dict[str, Any] | None = None + content: list[_MCPContentBlock] | None = None + + @field_validator("structuredContent", mode="before") + @classmethod + def _normalize_structured(cls, v: Any) -> dict[str, Any] | None: + if v is None: + return None + if isinstance(v, dict): + return v + dump = getattr(v, "model_dump", None) + if callable(dump): + try: + v = dump() + except Exception: + return None + return v if isinstance(v, dict) else None + + +def _parse_call_result(server: str, tool: str, result_obj: Any) -> MCPToolResult: + parsed = _MCPResultIn.model_validate(result_obj) + if (structured := parsed.structuredContent) is not None: + return MCPToolResult(server=server, tool=tool, text=None, structured=structured) + + blocks = parsed.content or [] + parts = [b.text for b in blocks if isinstance(b.text, str)] + text = "\n".join(parts) if parts else None + return MCPToolResult(server=server, tool=tool, text=text, structured=None) + + +async def list_tools_http( + url: str, headers: dict[str, str] | None = None +) -> list[RemoteTool]: + async with streamablehttp_client(url, headers=headers) as (read, write, _): + async with ClientSession(read, write) as session: + await session.initialize() + tools_resp = await session.list_tools() + return [RemoteTool.model_validate(t) for t in tools_resp.tools] + + +async def call_tool_http( + url: str, + tool_name: str, + arguments: dict[str, Any], + *, + headers: dict[str, str] | None = None, +) -> MCPToolResult: + async with streamablehttp_client(url, headers=headers) as (read, write, _): + async with ClientSession(read, write) as session: + await session.initialize() + result = await session.call_tool(tool_name, arguments) + return _parse_call_result(url, tool_name, result) + + +def create_mcp_http_proxy_tool_class( + *, + url: str, + remote: RemoteTool, + alias: str | None = None, + server_hint: str | None = None, + headers: dict[str, str] | None = None, +) -> type[BaseTool[_OpenArgs, MCPToolResult, BaseToolConfig, BaseToolState]]: + from urllib.parse import urlparse + + def _alias_from_url(url: str) -> str: + p = urlparse(url) + host = (p.hostname or "mcp").replace(".", "_") + port = f"_{p.port}" if p.port else "" + return f"{host}{port}" + + published_name = f"{(alias or _alias_from_url(url))}_{remote.name}" + + class MCPHttpProxyTool( + BaseTool[_OpenArgs, MCPToolResult, BaseToolConfig, BaseToolState] + ): + description: ClassVar[str] = ( + (f"[{alias}] " if alias else "") + + (remote.description or f"MCP tool '{remote.name}' from {url}") + + (f"\nHint: {server_hint}" if server_hint else "") + ) + _mcp_url: ClassVar[str] = url + _remote_name: ClassVar[str] = remote.name + _input_schema: ClassVar[dict[str, Any]] = remote.input_schema + _headers: ClassVar[dict[str, str]] = dict(headers or {}) + + @classmethod + def get_name(cls) -> str: + return published_name + + @classmethod + def get_parameters(cls) -> dict[str, Any]: + return dict(cls._input_schema) + + async def run(self, args: _OpenArgs) -> MCPToolResult: + try: + payload = args.model_dump(exclude_none=True) + return await call_tool_http( + self._mcp_url, self._remote_name, payload, headers=self._headers + ) + except Exception as exc: + raise ToolError(f"MCP call failed: {exc}") from exc + + @classmethod + def get_call_display(cls, event: ToolCallEvent) -> ToolCallDisplay: + return ToolCallDisplay( + summary=f"{published_name}", + details=event.args.model_dump() + if hasattr(event.args, "model_dump") + else {}, + ) + + @classmethod + def get_result_display(cls, event: ToolResultEvent) -> ToolResultDisplay: + if not isinstance(event.result, MCPToolResult): + return ToolResultDisplay( + success=False, + message=event.error or event.skip_reason or "No result", + ) + + message = f"MCP tool {event.result.tool} completed" + details = {} + if event.result.text: + details["text"] = event.result.text + if event.result.structured: + details["structured"] = event.result.structured + + return ToolResultDisplay( + success=event.result.ok, message=message, details=details + ) + + @classmethod + def get_status_text(cls) -> str: + return f"Calling MCP tool {remote.name}" + + MCPHttpProxyTool.__name__ = f"MCP_{(alias or _alias_from_url(url))}__{remote.name}" + return MCPHttpProxyTool + + +async def list_tools_stdio(command: list[str]) -> list[RemoteTool]: + params = StdioServerParameters(command=command[0], args=command[1:]) + async with stdio_client(params) as (read, write): + async with ClientSession(read, write) as session: + await session.initialize() + tools_resp = await session.list_tools() + return [RemoteTool.model_validate(t) for t in tools_resp.tools] + + +async def call_tool_stdio( + command: list[str], tool_name: str, arguments: dict[str, Any] +) -> MCPToolResult: + params = StdioServerParameters(command=command[0], args=command[1:]) + async with stdio_client(params) as (read, write): + async with ClientSession(read, write) as session: + await session.initialize() + result = await session.call_tool(tool_name, arguments) + return _parse_call_result("stdio:" + " ".join(command), tool_name, result) + + +def create_mcp_stdio_proxy_tool_class( + *, + command: list[str], + remote: RemoteTool, + alias: str | None = None, + server_hint: str | None = None, +) -> type[BaseTool[_OpenArgs, MCPToolResult, BaseToolConfig, BaseToolState]]: + def _alias_from_command(cmd: list[str]) -> str: + prog = Path(cmd[0]).name.replace(".", "_") if cmd else "mcp" + digest = hashlib.blake2s( + "\0".join(cmd).encode("utf-8"), digest_size=4 + ).hexdigest() + return f"{prog}_{digest}" + + computed_alias = alias or _alias_from_command(command) + published_name = f"{computed_alias}_{remote.name}" + + class MCPStdioProxyTool( + BaseTool[_OpenArgs, MCPToolResult, BaseToolConfig, BaseToolState] + ): + description: ClassVar[str] = ( + (f"[{computed_alias}] " if computed_alias else "") + + ( + remote.description + or f"MCP tool '{remote.name}' from stdio command: {' '.join(command)}" + ) + + (f"\nHint: {server_hint}" if server_hint else "") + ) + _stdio_command: ClassVar[list[str]] = command + _remote_name: ClassVar[str] = remote.name + _input_schema: ClassVar[dict[str, Any]] = remote.input_schema + + @classmethod + def get_name(cls) -> str: + return published_name + + @classmethod + def get_parameters(cls) -> dict[str, Any]: + return dict(cls._input_schema) + + async def run(self, args: _OpenArgs) -> MCPToolResult: + try: + payload = args.model_dump(exclude_none=True) + result = await call_tool_stdio( + self._stdio_command, self._remote_name, payload + ) + return result + except Exception as exc: + raise ToolError(f"MCP stdio call failed: {exc!r}") from exc + + @classmethod + def get_call_display(cls, event: ToolCallEvent) -> ToolCallDisplay: + return ToolCallDisplay( + summary=f"{published_name}", + details=event.args.model_dump() + if hasattr(event.args, "model_dump") + else {}, + ) + + @classmethod + def get_result_display(cls, event: ToolResultEvent) -> ToolResultDisplay: + if not isinstance(event.result, MCPToolResult): + return ToolResultDisplay( + success=False, + message=event.error or event.skip_reason or "No result", + ) + + message = f"MCP tool {event.result.tool} completed" + details = {} + if event.result.text: + details["text"] = event.result.text + if event.result.structured: + details["structured"] = event.result.structured + + return ToolResultDisplay( + success=event.result.ok, message=message, details=details + ) + + @classmethod + def get_status_text(cls) -> str: + return f"Calling MCP tool {remote.name}" + + MCPStdioProxyTool.__name__ = f"MCP_STDIO_{computed_alias}__{remote.name}" + return MCPStdioProxyTool diff --git a/vibe/core/tools/ui.py b/vibe/core/tools/ui.py new file mode 100644 index 0000000..db1e8f7 --- /dev/null +++ b/vibe/core/tools/ui.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable + +from pydantic import BaseModel, Field + +if TYPE_CHECKING: + from vibe.core.types import ToolCallEvent, ToolResultEvent + + +class ToolCallDisplay(BaseModel): + summary: str # Brief description: "Writing file.txt", "Patching code.py" + content: str | None = None # Optional content preview + details: dict[str, Any] = Field(default_factory=dict) # Tool-specific data + + +class ToolResultDisplay(BaseModel): + success: bool + message: str + warnings: list[str] = Field(default_factory=list) + details: dict[str, Any] = Field(default_factory=dict) # Tool-specific data + + +@runtime_checkable +class ToolUIData[TArgs: BaseModel, TResult: BaseModel](Protocol): + @classmethod + def get_call_display(cls, event: ToolCallEvent) -> ToolCallDisplay: ... + + @classmethod + def get_result_display(cls, event: ToolResultEvent) -> ToolResultDisplay: ... + + @classmethod + def get_status_text(cls) -> str: ... + + +class ToolUIDataAdapter: + def __init__(self, tool_class: Any) -> None: + self.tool_class = tool_class + self.ui_data_class: type[ToolUIData[Any, Any]] | None = ( + tool_class if issubclass(tool_class, ToolUIData) else None + ) + + def get_call_display(self, event: ToolCallEvent) -> ToolCallDisplay: + if self.ui_data_class: + return self.ui_data_class.get_call_display(event) + + args_dict = event.args.model_dump() if hasattr(event.args, "model_dump") else {} + args_str = ", ".join(f"{k}={v!r}" for k, v in list(args_dict.items())[:3]) + return ToolCallDisplay( + summary=f"{event.tool_name}({args_str})", details=args_dict + ) + + def get_result_display(self, event: ToolResultEvent) -> ToolResultDisplay: + if event.error: + return ToolResultDisplay(success=False, message=event.error) + + if event.skipped: + return ToolResultDisplay( + success=False, message=event.skip_reason or "Skipped" + ) + + if self.ui_data_class: + return self.ui_data_class.get_result_display(event) + + return ToolResultDisplay( + success=True, + message="Success", + details=( + event.result.model_dump() + if event.result and hasattr(event.result, "model_dump") + else {} + ), + ) + + def get_status_text(self) -> str: + if self.ui_data_class: + return self.ui_data_class.get_status_text() + + tool_name = getattr(self.tool_class, "get_name", lambda: "tool")() + return f"Running {tool_name}" diff --git a/vibe/core/types.py b/vibe/core/types.py new file mode 100644 index 0000000..c537a18 --- /dev/null +++ b/vibe/core/types.py @@ -0,0 +1,271 @@ +from __future__ import annotations + +from abc import ABC +from collections.abc import Awaitable, Callable +from dataclasses import dataclass +from enum import StrEnum, auto +from typing import Annotated, Any, Literal + +from pydantic import ( + BaseModel, + BeforeValidator, + ConfigDict, + Field, + computed_field, + model_validator, +) + +from vibe.core.tools.base import BaseTool + + +@dataclass +class ResumeSessionInfo: + type: Literal["continue", "resume"] + session_id: str + session_time: str + + def message(self) -> str: + action = None + match self.type: + case "continue": + action = "Continuing" + case "resume": + action = "Resuming" + return f"{action} session `{self.session_id}` from {self.session_time}" + + +class AgentStats(BaseModel): + steps: int = 0 + session_prompt_tokens: int = 0 + session_completion_tokens: int = 0 + tool_calls_agreed: int = 0 + tool_calls_rejected: int = 0 + tool_calls_failed: int = 0 + tool_calls_succeeded: int = 0 + + context_tokens: int = 0 + + last_turn_prompt_tokens: int = 0 + last_turn_completion_tokens: int = 0 + last_turn_duration: float = 0.0 + tokens_per_second: float = 0.0 + + input_price_per_million: float = 0.0 + output_price_per_million: float = 0.0 + + @computed_field + @property + def session_total_llm_tokens(self) -> int: + return self.session_prompt_tokens + self.session_completion_tokens + + @computed_field + @property + def last_turn_total_tokens(self) -> int: + return self.last_turn_prompt_tokens + self.last_turn_completion_tokens + + @computed_field + @property + def session_cost(self) -> float: + """Calculate the total session cost in dollars based on token usage and pricing. + + NOTE: This is a rough estimate and is worst-case scenario. + The actual cost may be lower due to prompt caching. + If the model changes mid-session, this uses current pricing for all tokens. + """ + input_cost = ( + self.session_prompt_tokens / 1_000_000 + ) * self.input_price_per_million + output_cost = ( + self.session_completion_tokens / 1_000_000 + ) * self.output_price_per_million + return input_cost + output_cost + + def update_pricing(self, input_price: float, output_price: float) -> None: + """Update pricing info when model changes. + + NOTE: session_cost will be recalculated using new pricing for all + accumulated tokens. This is a known approximation when models change. + This should not be a big issue, pricing is only used for max_price which is in + programmatic mode, so user should not update models there. + """ + self.input_price_per_million = input_price + self.output_price_per_million = output_price + + def reset_context_state(self) -> None: + """Reset context-related fields while preserving cumulative session stats. + + Used after config reload or similar operations where the context + changes but we want to preserve session totals. + """ + self.context_tokens = 0 + self.last_turn_prompt_tokens = 0 + self.last_turn_completion_tokens = 0 + self.last_turn_duration = 0.0 + self.tokens_per_second = 0.0 + + +class SessionInfo(BaseModel): + session_id: str + start_time: str + message_count: int + stats: AgentStats + save_dir: str + + +class SessionMetadata(BaseModel): + session_id: str + start_time: str + end_time: str | None + git_commit: str | None + git_branch: str | None + environment: dict[str, str | None] + auto_approve: bool = False + username: str + + +StrToolChoice = Literal["auto", "none", "any", "required"] + + +class AvailableFunction(BaseModel): + name: str + description: str + parameters: dict[str, Any] + + +class AvailableTool(BaseModel): + type: Literal["function"] = "function" + function: AvailableFunction + + +class FunctionCall(BaseModel): + name: str | None = None + arguments: str | None = None + + +class ToolCall(BaseModel): + id: str | None = None + index: int | None = None + function: FunctionCall = Field(default_factory=FunctionCall) + type: str = "function" + + +def _content_before(v: Any) -> str: + if isinstance(v, str): + return v + if isinstance(v, list): + parts: list[str] = [] + for p in v: + if isinstance(p, dict) and isinstance(p.get("text"), str): + parts.append(p["text"]) + else: + parts.append(str(p)) + return "\n".join(parts) + return str(v) + + +Content = Annotated[str, BeforeValidator(_content_before)] + + +class Role(StrEnum): + system = auto() + user = auto() + assistant = auto() + tool = auto() + + +class LLMMessage(BaseModel): + model_config = ConfigDict(extra="ignore") + + role: Role + content: Content | None = None + tool_calls: list[ToolCall] | None = None + name: str | None = None + tool_call_id: str | None = None + + @model_validator(mode="before") + @classmethod + def _from_any(cls, v: Any) -> dict[str, Any] | Any: + if isinstance(v, dict): + v.setdefault("content", "") + v.setdefault("role", "assistant") + return v + return { + "role": str(getattr(v, "role", "assistant")), + "content": getattr(v, "content", ""), + "tool_calls": getattr(v, "tool_calls", None), + "name": getattr(v, "name", None), + "tool_call_id": getattr(v, "tool_call_id", None), + } + + +class LLMUsage(BaseModel): + model_config = ConfigDict(frozen=True) + prompt_tokens: int = 0 + completion_tokens: int = 0 + + +class LLMChunk(BaseModel): + model_config = ConfigDict(frozen=True) + message: LLMMessage + finish_reason: str | None = None + usage: LLMUsage | None = None + + +class BaseEvent(BaseModel, ABC): + """Abstract base class for all agent events.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + +class AssistantEvent(BaseEvent): + content: str + prompt_tokens: int = 0 + completion_tokens: int = 0 + session_total_tokens: int = 0 + last_turn_duration: float = 0.0 + tokens_per_second: float = 0.0 + stopped_by_middleware: bool = False + + +class ToolCallEvent(BaseEvent): + tool_name: str + tool_class: type[BaseTool] + args: BaseModel + tool_call_id: str + + +class ToolResultEvent(BaseEvent): + tool_name: str + tool_class: type[BaseTool] | None + result: BaseModel | None = None + error: str | None = None + skipped: bool = False + skip_reason: str | None = None + duration: float | None = None + tool_call_id: str + + +class CompactStartEvent(BaseEvent): + current_context_tokens: int + threshold: int + + +class CompactEndEvent(BaseEvent): + old_context_tokens: int + new_context_tokens: int + summary_length: int + + +class OutputFormat(StrEnum): + TEXT = auto() + JSON = auto() + STREAMING = auto() + + +type AsyncApprovalCallback = Callable[ + [str, dict[str, Any], str], Awaitable[tuple[str, str | None]] +] + +type SyncApprovalCallback = Callable[[str, dict[str, Any], str], tuple[str, str | None]] + +type ApprovalCallback = AsyncApprovalCallback | SyncApprovalCallback diff --git a/vibe/core/utils.py b/vibe/core/utils.py new file mode 100644 index 0000000..88ae667 --- /dev/null +++ b/vibe/core/utils.py @@ -0,0 +1,285 @@ +from __future__ import annotations + +import asyncio +from collections.abc import AsyncGenerator, Awaitable, Callable, Coroutine +import concurrent.futures +from enum import Enum, StrEnum, auto +import functools +import logging +from pathlib import Path +import re +import sys +from typing import Any + +import httpx + +from vibe.core import __version__ +from vibe.core.config import CONFIG_DIR, CONFIG_FILE, GLOBAL_CONFIG_FILE +from vibe.core.types import BaseEvent, ToolResultEvent + + +class ApprovalResponse(StrEnum): + YES = "y" + NO = "n" + ALWAYS = "a" + + +CANCELLATION_TAG = "user_cancellation" +TOOL_ERROR_TAG = "tool_error" +VIBE_STOP_EVENT_TAG = "vibe_stop_event" +VIBE_WARNING_TAG = "vibe_warning" + +KNOWN_TAGS = [CANCELLATION_TAG, TOOL_ERROR_TAG, VIBE_STOP_EVENT_TAG, VIBE_WARNING_TAG] + + +class TaggedText: + _TAG_PATTERN = re.compile( + rf"<({'|'.join(re.escape(tag) for tag in KNOWN_TAGS)})>(.*?)", + flags=re.DOTALL, + ) + + def __init__(self, message: str, tag: str = "") -> None: + self.message = message + self.tag = tag + + def __str__(self) -> str: + if not self.tag: + return self.message + return f"<{self.tag}>{self.message}" + + @staticmethod + def from_string(text: str) -> TaggedText: + found_tag = "" + result = text + + def replace_tag(match: re.Match[str]) -> str: + nonlocal found_tag + tag_name = match.group(1) + content = match.group(2) + if not found_tag: + found_tag = tag_name + return content + + result = TaggedText._TAG_PATTERN.sub(replace_tag, text) + + if found_tag: + return TaggedText(result, found_tag) + + return TaggedText(text, "") + + +class CancellationReason(Enum): + OPERATION_CANCELLED = auto() + TOOL_INTERRUPTED = auto() + TOOL_NO_RESPONSE = auto() + TOOL_SKIPPED = auto() + + +def get_user_cancellation_message( + cancellation_reason: CancellationReason, tool_name: str | None = None +) -> TaggedText: + match cancellation_reason: + case CancellationReason.OPERATION_CANCELLED: + return TaggedText("User cancelled the operation.", CANCELLATION_TAG) + case CancellationReason.TOOL_INTERRUPTED: + return TaggedText("Tool execution interrupted by user.", CANCELLATION_TAG) + case CancellationReason.TOOL_NO_RESPONSE: + return TaggedText( + "Tool execution interrupted - no response available", CANCELLATION_TAG + ) + case CancellationReason.TOOL_SKIPPED: + return TaggedText( + tool_name or "Tool execution skipped by user.", CANCELLATION_TAG + ) + + +def is_user_cancellation_event(event: BaseEvent) -> bool: + return ( + isinstance(event, ToolResultEvent) + and event.skipped + and event.skip_reason is not None + and f"<{CANCELLATION_TAG}>" in event.skip_reason + ) + + +def is_dangerous_directory(path: Path | str = ".") -> tuple[bool, str]: + """Check if the current directory is a dangerous folder that would cause + issues if we were to run the tool there. + + Args: + path: Path to check (defaults to current directory) + + Returns: + tuple[bool, str]: (is_dangerous, reason) where reason explains why it's dangerous + """ + path = Path(path).resolve() + + home_dir = Path.home() + + dangerous_paths = { + home_dir: "home directory", + home_dir / "Documents": "Documents folder", + home_dir / "Desktop": "Desktop folder", + home_dir / "Downloads": "Downloads folder", + home_dir / "Pictures": "Pictures folder", + home_dir / "Movies": "Movies folder", + home_dir / "Music": "Music folder", + home_dir / "Library": "Library folder", + Path("/Applications"): "Applications folder", + Path("/System"): "System folder", + Path("/Library"): "System Library folder", + Path("/usr"): "System usr folder", + Path("/private"): "System private folder", + } + + for dangerous_path, description in dangerous_paths.items(): + try: + if path == dangerous_path: + return True, f"You are in the {description}" + except (OSError, ValueError): + continue + return False, "" + + +LOG_DIR = CONFIG_DIR +LOG_DIR.mkdir(parents=True, exist_ok=True) +LOG_FILE = LOG_DIR / "vibe.log" + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(levelname)s %(message)s", + handlers=[logging.FileHandler(LOG_FILE, "a", "utf-8")], +) +logger = logging.getLogger("vibe") +logger.info("Using config: %s", CONFIG_FILE) +if CONFIG_FILE != GLOBAL_CONFIG_FILE and GLOBAL_CONFIG_FILE.is_file(): + logger.warning( + "Project config active (%s); ignoring global config (%s)", + CONFIG_FILE, + GLOBAL_CONFIG_FILE, + ) + + +def get_user_agent() -> str: + return f"Mistral-Vibe/{__version__}" + + +def _is_retryable_http_error(e: Exception) -> bool: + if isinstance(e, httpx.HTTPStatusError): + return e.response.status_code in {408, 409, 425, 429, 500, 502, 503, 504} + return False + + +def async_retry[T, **P]( + tries: int = 3, + delay_seconds: float = 0.5, + backoff_factor: float = 2.0, + is_retryable: Callable[[Exception], bool] = _is_retryable_http_error, +) -> Callable[[Callable[P, Awaitable[T]]], Callable[P, Awaitable[T]]]: + """Args: + tries: Number of retry attempts + delay_seconds: Initial delay between retries in seconds + backoff_factor: Multiplier for delay on each retry + is_retryable: Function to determine if an exception should trigger a retry + (defaults to checking for retryable HTTP errors from both urllib and httpx) + + Returns: + Decorated function with retry logic + """ + + def decorator(func: Callable[P, Awaitable[T]]) -> Callable[P, Awaitable[T]]: + @functools.wraps(func) + async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: + last_exc = None + for attempt in range(tries): + try: + return await func(*args, **kwargs) + except Exception as e: + last_exc = e + if attempt < tries - 1 and is_retryable(e): + current_delay = (delay_seconds * (backoff_factor**attempt)) + ( + 0.05 * attempt + ) + await asyncio.sleep(current_delay) + continue + raise e + raise RuntimeError( + f"Retries exhausted. Last error: {last_exc}" + ) from last_exc + + return wrapper + + return decorator + + +def async_generator_retry[T, **P]( + tries: int = 3, + delay_seconds: float = 0.5, + backoff_factor: float = 2.0, + is_retryable: Callable[[Exception], bool] = _is_retryable_http_error, +) -> Callable[[Callable[P, AsyncGenerator[T]]], Callable[P, AsyncGenerator[T]]]: + """Retry decorator for async generators. + + Args: + tries: Number of retry attempts + delay_seconds: Initial delay between retries in seconds + backoff_factor: Multiplier for delay on each retry + is_retryable: Function to determine if an exception should trigger a retry + (defaults to checking for retryable HTTP errors from both urllib and httpx) + + Returns: + Decorated async generator function with retry logic + """ + + def decorator( + func: Callable[P, AsyncGenerator[T]], + ) -> Callable[P, AsyncGenerator[T]]: + @functools.wraps(func) + async def wrapper(*args: P.args, **kwargs: P.kwargs) -> AsyncGenerator[T]: + last_exc = None + for attempt in range(tries): + try: + async for item in func(*args, **kwargs): + yield item + return + except Exception as e: + last_exc = e + if attempt < tries - 1 and is_retryable(e): + current_delay = (delay_seconds * (backoff_factor**attempt)) + ( + 0.05 * attempt + ) + await asyncio.sleep(current_delay) + continue + raise e + raise RuntimeError( + f"Retries exhausted. Last error: {last_exc}" + ) from last_exc + + return wrapper + + return decorator + + +class ConversationLimitException(Exception): + pass + + +def run_sync[T](coro: Coroutine[Any, Any, T]) -> T: + """Run an async coroutine synchronously, handling nested event loops. + + If called from within an async context (running event loop), runs the + coroutine in a thread pool executor. Otherwise, uses asyncio.run(). + + This mirrors the pattern used by ToolManager for MCP integration. + """ + try: + asyncio.get_running_loop() + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, coro) + return future.result() + except RuntimeError: + return asyncio.run(coro) + + +def is_windows() -> bool: + return sys.platform == "win32" diff --git a/vibe/setup/onboarding/__init__.py b/vibe/setup/onboarding/__init__.py new file mode 100644 index 0000000..b92aee2 --- /dev/null +++ b/vibe/setup/onboarding/__init__.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import sys + +from rich import print as rprint +from textual.app import App + +from vibe.core.config import GLOBAL_ENV_FILE +from vibe.setup.onboarding.screens import ( + ApiKeyScreen, + ThemeSelectionScreen, + WelcomeScreen, +) + + +class OnboardingApp(App[str | None]): + CSS_PATH = "onboarding.tcss" + + def on_mount(self) -> None: + self.install_screen(WelcomeScreen(), "welcome") + self.install_screen(ThemeSelectionScreen(), "theme_selection") + self.install_screen(ApiKeyScreen(), "api_key") + self.push_screen("welcome") + + +def run_onboarding(app: App | None = None) -> None: + result = (app or OnboardingApp()).run() + match result: + case None: + rprint("\n[yellow]Setup cancelled. See you next time![/]") + sys.exit(0) + case str() as s if s.startswith("save_error:"): + err = s.removeprefix("save_error:") + rprint( + f"\n[yellow]Warning: Could not save API key to .env file: {err}[/]" + "\n[dim]The API key is set for this session only. " + f"You may need to set it manually in {GLOBAL_ENV_FILE}[/]\n" + ) + case "completed": + pass diff --git a/vibe/setup/onboarding/base.py b/vibe/setup/onboarding/base.py new file mode 100644 index 0000000..bea16b1 --- /dev/null +++ b/vibe/setup/onboarding/base.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from textual.screen import Screen + + +class OnboardingScreen(Screen[str | None]): + NEXT_SCREEN: str | None = None + + def action_next(self) -> None: + if self.NEXT_SCREEN: + self.app.switch_screen(self.NEXT_SCREEN) + + def action_cancel(self) -> None: + self.app.exit(None) diff --git a/vibe/setup/onboarding/onboarding.tcss b/vibe/setup/onboarding/onboarding.tcss new file mode 100644 index 0000000..ec616bd --- /dev/null +++ b/vibe/setup/onboarding/onboarding.tcss @@ -0,0 +1,230 @@ +/* ============================================================================= + Onboarding App Styles + ============================================================================= */ + +Screen { + align: center middle; +} + +OnboardingScreen { + align: center middle; +} + +/* ============================================================================= + Welcome Screen + ============================================================================= */ + +#welcome-container { + align: center middle; +} + +#welcome-text { + border: round #555555; + padding: 1 3; + margin-bottom: 2; + text-align: center; + width: auto; +} + +WelcomeScreen #enter-hint { + color: $text-muted; + min-width: 16; + text-align: center; +} + +WelcomeScreen #enter-hint.hidden { + visibility: hidden; +} + +/* ============================================================================= + Theme Selection Screen + ============================================================================= */ + +#theme-outer { + width: 100%; + height: 100%; + align: center middle; + overflow-y: auto; +} + +#theme-content { + width: auto; + height: auto; + padding: 1 0; +} + +#theme-title { + text-align: center; + width: 100%; + margin-bottom: 2; +} + +#theme-row { + width: auto; + height: auto; +} + +#nav-hint { + color: $text-muted; + width: 16; + height: 100%; + content-align: right middle; + padding-right: 2; +} + +#theme-list { + width: 24; + height: auto; +} + +.theme-item { + text-align: center; + width: 100%; +} + +.theme-item.selected { + color: $text; + text-style: bold; + border: round #555555; + padding: 0 2; +} + +.theme-item.fade-1 { + text-opacity: 50%; +} + +.theme-item.fade-2 { + text-opacity: 25%; +} + +.theme-item.fade-3 { + text-opacity: 10%; +} + +ThemeSelectionScreen #enter-hint { + color: $text-muted; + width: 16; + height: 100%; + content-align: left middle; + padding-left: 2; +} + +#preview-center { + width: 100%; + height: auto; + margin-top: 2; + align: center middle; +} + +#preview { + min-width: 50; + max-width: 70; + height: auto; + max-height: 100%; + overflow: auto; + border: round #555555; + border-title-align: center; +} + +#preview-inner { + width: 100%; + height: auto; + padding: 0 1 0 1; +} + +/* ============================================================================= + API Key Screen + ============================================================================= */ + +#api-key-outer { + overflow-y: auto; +} + +.spacer { + height: 1fr; +} + +#api-key-title { + text-align: center; + margin-bottom: 2; +} + +#api-key-content { + width: auto; + height: auto; +} + +#api-key-content Static { + text-align: center; +} + +.link-row { + width: auto; + height: auto; + margin-top: 1; +} + +.link-chevron { + width: auto; +} + +#input-box { + border: round #555555; + padding: 0 1; + margin-top: 2; + width: auto; + height: 3; +} + +#input-box.valid { + border: round $success; +} + +#input-box.invalid { + border: round $error; +} + +#key { + border: none; + width: 48; + height: 1; + padding: 0; +} + +#paste-hint { + margin-top: 3; +} + +#feedback { + text-align: center; + height: 1; + margin-top: 1; +} + +#feedback.error { + color: $error; +} + +#feedback.success { + color: $text-muted; +} + +#config-docs-section { + width: 100%; + height: auto; + align: center top; + padding-bottom: 2; +} + +#config-docs-group { + width: auto; + height: auto; +} + +#config-docs-group Static { + color: $text-muted; +} + +#config-docs-group .link-row { + margin-top: 0; +} diff --git a/vibe/setup/onboarding/screens/__init__.py b/vibe/setup/onboarding/screens/__init__.py new file mode 100644 index 0000000..76113dd --- /dev/null +++ b/vibe/setup/onboarding/screens/__init__.py @@ -0,0 +1,7 @@ +from __future__ import annotations + +from vibe.setup.onboarding.screens.api_key import ApiKeyScreen +from vibe.setup.onboarding.screens.theme_selection import ThemeSelectionScreen +from vibe.setup.onboarding.screens.welcome import WelcomeScreen + +__all__ = ["ApiKeyScreen", "ThemeSelectionScreen", "WelcomeScreen"] diff --git a/vibe/setup/onboarding/screens/api_key.py b/vibe/setup/onboarding/screens/api_key.py new file mode 100644 index 0000000..65df1bb --- /dev/null +++ b/vibe/setup/onboarding/screens/api_key.py @@ -0,0 +1,133 @@ +from __future__ import annotations + +import os +from typing import ClassVar + +from dotenv import set_key +from textual.app import ComposeResult +from textual.binding import Binding, BindingType +from textual.containers import Center, Horizontal, Vertical +from textual.events import MouseUp +from textual.validation import Length +from textual.widgets import Input, Link, Static + +from vibe.cli.clipboard import copy_selection_to_clipboard +from vibe.core.config import GLOBAL_ENV_FILE, VibeConfig +from vibe.setup.onboarding.base import OnboardingScreen + +PROVIDER_HELP = { + "mistral": ("https://console.mistral.ai/codestral/vibe", "Mistral AI Studio") +} +CONFIG_DOCS_URL = ( + "https://github.com/mistralai/mistral-vibe?tab=readme-ov-file#configuration" +) + + +def _save_api_key_to_env_file(env_key: str, api_key: str) -> None: + GLOBAL_ENV_FILE.parent.mkdir(parents=True, exist_ok=True) + set_key(GLOBAL_ENV_FILE, env_key, api_key) + + +class ApiKeyScreen(OnboardingScreen): + BINDINGS: ClassVar[list[BindingType]] = [ + Binding("ctrl+c", "cancel", "Cancel", show=False), + Binding("escape", "cancel", "Cancel", show=False), + ] + + NEXT_SCREEN = None + + def __init__(self) -> None: + super().__init__() + config = VibeConfig.model_construct() + active_model = config.get_active_model() + self.provider = config.get_provider_for_model(active_model) + + def _compose_provider_link(self, provider_name: str) -> ComposeResult: + if self.provider.name not in PROVIDER_HELP: + return + + help_url, help_name = PROVIDER_HELP[self.provider.name] + yield Static(f"Grab your {provider_name} API key from the {help_name}:") + yield Center( + Horizontal( + Static("→ ", classes="link-chevron"), + Link(help_url, url=help_url), + classes="link-row", + ) + ) + + def _compose_config_docs(self) -> ComposeResult: + yield Static("[dim]Learn more about Vibe configuration:[/]") + yield Horizontal( + Static("→ ", classes="link-chevron"), + Link(CONFIG_DOCS_URL, url=CONFIG_DOCS_URL), + classes="link-row", + ) + + def compose(self) -> ComposeResult: + provider_name = self.provider.name.capitalize() + + self.input_widget = Input( + password=True, + id="key", + placeholder="Paste your API key here", + validators=[Length(minimum=1, failure_description="No API key provided.")], + ) + + with Vertical(id="api-key-outer"): + yield Static("", classes="spacer") + yield Center(Static("One last thing...", id="api-key-title")) + with Center(): + with Vertical(id="api-key-content"): + yield from self._compose_provider_link(provider_name) + yield Static( + "...and paste it below to finish the setup:", id="paste-hint" + ) + yield Center(Horizontal(self.input_widget, id="input-box")) + yield Static("", id="feedback") + yield Static("", classes="spacer") + yield Vertical( + Vertical(*self._compose_config_docs(), id="config-docs-group"), + id="config-docs-section", + ) + + def on_mount(self) -> None: + self.input_widget.focus() + + def on_input_changed(self, event: Input.Changed) -> None: + feedback = self.query_one("#feedback", Static) + input_box = self.query_one("#input-box") + + if event.validation_result is None: + return + + input_box.remove_class("valid", "invalid") + feedback.remove_class("error", "success") + + if event.validation_result.is_valid: + feedback.update("Press Enter to submit ↵") + feedback.add_class("success") + input_box.add_class("valid") + return + + descriptions = event.validation_result.failure_descriptions + feedback.update(descriptions[0]) + feedback.add_class("error") + input_box.add_class("invalid") + + def on_input_submitted(self, event: Input.Submitted) -> None: + if event.validation_result and event.validation_result.is_valid: + self._save_and_finish(event.value) + + def _save_and_finish(self, api_key: str) -> None: + env_key = self.provider.api_key_env_var + os.environ[env_key] = api_key + try: + _save_api_key_to_env_file(env_key, api_key) + except OSError as err: + self.app.exit(f"save_error:{err}") + return + self.app.exit("completed") + + def on_mouse_up(self, event: MouseUp) -> None: + copy_selection_to_clipboard(self.app) diff --git a/vibe/setup/onboarding/screens/theme_selection.py b/vibe/setup/onboarding/screens/theme_selection.py new file mode 100644 index 0000000..e9379e5 --- /dev/null +++ b/vibe/setup/onboarding/screens/theme_selection.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from typing import ClassVar + +from textual.app import ComposeResult +from textual.binding import Binding, BindingType +from textual.containers import Center, Container, Horizontal, Vertical +from textual.events import Resize +from textual.theme import BUILTIN_THEMES +from textual.widgets import Markdown, Static + +from vibe.core.config import VibeConfig +from vibe.setup.onboarding.base import OnboardingScreen + +THEMES = sorted(k for k in BUILTIN_THEMES if k != "textual-ansi") + +VISIBLE_NEIGHBORS = 3 +FADE_CLASSES = ["fade-1", "fade-2", "fade-3"] + +PREVIEW_MARKDOWN = """ +### Heading + +**Bold**, *italic*, and `inline code`. + +- Bullet point +- Another bullet point + +1. First item +2. Second item + +```python +def greet(name: str = "World") -> str: + return f"Hello, {name}!" +``` + +> Blockquote + +--- + +| Column 1 | Column 2 | +|----------|----------| +| Item 1 | Item 2 | +""" + + +class ThemeSelectionScreen(OnboardingScreen): + BINDINGS: ClassVar[list[BindingType]] = [ + Binding("enter", "next", "Next", show=False, priority=True), + Binding("up", "prev_theme", "Previous", show=False), + Binding("down", "next_theme", "Next Theme", show=False), + Binding("ctrl+c", "cancel", "Cancel", show=False), + Binding("escape", "cancel", "Cancel", show=False), + ] + + NEXT_SCREEN = "api_key" + + def __init__(self) -> None: + super().__init__() + self._theme_index = 0 + self._theme_widgets: list[Static] = [] + + def _compose_theme_list(self) -> ComposeResult: + for _ in range(VISIBLE_NEIGHBORS * 2 + 1): + widget = Static("", classes="theme-item") + self._theme_widgets.append(widget) + yield widget + + def compose(self) -> ComposeResult: + with Center(id="theme-outer"): + with Vertical(id="theme-content"): + yield Static("Select your preferred theme", id="theme-title") + yield Center( + Horizontal( + Static("Navigate ↑ ↓", id="nav-hint"), + Vertical(*self._compose_theme_list(), id="theme-list"), + Static("Press Enter ↵", id="enter-hint"), + id="theme-row", + ) + ) + with Container(id="preview-center"): + preview = Container(id="preview") + preview.border_title = "Preview" + with preview: + yield Container(Markdown(PREVIEW_MARKDOWN), id="preview-inner") + + def on_mount(self) -> None: + current_theme = self.app.theme + if current_theme in THEMES: + self._theme_index = THEMES.index(current_theme) + self._update_display() + self._update_preview_height() + self.focus() + + def on_resize(self, _: Resize) -> None: + self._update_preview_height() + + def _update_preview_height(self) -> None: + # Height is dynamically set because css won't allow filling available space and page scroll on overflow. + preview = self.query_one("#preview", Container) + header_height = 17 # title + margins + theme row + padding + buffer + available = self.app.size.height - header_height + preview.styles.max_height = max(10, available) + + def _get_theme_at_offset(self, offset: int) -> str: + index = (self._theme_index + offset) % len(THEMES) + return THEMES[index] + + def _update_display(self) -> None: + for i, widget in enumerate(self._theme_widgets): + offset = i - VISIBLE_NEIGHBORS + theme = self._get_theme_at_offset(offset) + + widget.remove_class("selected", *FADE_CLASSES) + + if offset == 0: + widget.update(f" {theme} ") + widget.add_class("selected") + else: + distance = min(abs(offset) - 1, len(FADE_CLASSES) - 1) + widget.update(theme) + widget.add_class(FADE_CLASSES[distance]) + + def _navigate(self, direction: int) -> None: + self._theme_index = (self._theme_index + direction) % len(THEMES) + self.app.theme = THEMES[self._theme_index] + self._update_display() + + def action_next_theme(self) -> None: + self._navigate(1) + + def action_prev_theme(self) -> None: + self._navigate(-1) + + def action_next(self) -> None: + theme = THEMES[self._theme_index] + try: + VibeConfig.save_updates({"textual_theme": theme}) + except OSError: + pass + super().action_next() diff --git a/vibe/setup/onboarding/screens/welcome.py b/vibe/setup/onboarding/screens/welcome.py new file mode 100644 index 0000000..34aee96 --- /dev/null +++ b/vibe/setup/onboarding/screens/welcome.py @@ -0,0 +1,135 @@ +from __future__ import annotations + +from typing import ClassVar + +from textual.app import ComposeResult +from textual.binding import Binding, BindingType +from textual.containers import Center, Vertical +from textual.timer import Timer +from textual.widgets import Static + +from vibe.setup.onboarding.base import OnboardingScreen + +WELCOME_PREFIX = "Welcome to " +WELCOME_HIGHLIGHT = "Mistral Vibe" +WELCOME_SUFFIX = " - Let's get you started!" +WELCOME_TEXT = WELCOME_PREFIX + WELCOME_HIGHLIGHT + WELCOME_SUFFIX + +HIGHLIGHT_START = len(WELCOME_PREFIX) +HIGHLIGHT_END = HIGHLIGHT_START + len(WELCOME_HIGHLIGHT) + +BUTTON_TEXT = "Press Enter ↵" + +GRADIENT_COLORS = [ + "#ff6b00", + "#ff7b00", + "#ff8c00", + "#ff9d00", + "#ffae00", + "#ffbf00", + "#ffae00", + "#ff9d00", + "#ff8c00", + "#ff7b00", +] + + +def _apply_gradient(text: str, offset: int) -> str: + result = [] + for i, char in enumerate(text): + color = GRADIENT_COLORS[(i + offset) % len(GRADIENT_COLORS)] + result.append(f"[bold {color}]{char}[/]") + return "".join(result) + + +class WelcomeScreen(OnboardingScreen): + BINDINGS: ClassVar[list[BindingType]] = [ + Binding("enter", "next", "Next", show=False, priority=True), + Binding("ctrl+c", "cancel", "Cancel", show=False), + Binding("escape", "cancel", "Cancel", show=False), + ] + + NEXT_SCREEN = "theme_selection" + + def __init__(self) -> None: + super().__init__() + self._char_index = 0 + self._gradient_offset = 0 + self._typing_done = False + self._paused = False + self._typing_timer: Timer | None = None + self._button_char_index = 0 + self._button_typing_timer: Timer | None = None + self._welcome_text: Static + self._enter_hint: Static + + def compose(self) -> ComposeResult: + with Vertical(id="welcome-container"): + with Center(): + yield Static("", id="welcome-text") + with Center(): + yield Static("", id="enter-hint", classes="hidden") + + def on_mount(self) -> None: + self._welcome_text = self.query_one("#welcome-text", Static) + self._enter_hint = self.query_one("#enter-hint", Static) + self._typing_timer = self.set_interval(0.04, self._type_next_char) + self.focus() + + def _render_text(self, length: int) -> str: + text = WELCOME_TEXT[:length] + + if length <= HIGHLIGHT_START: + return text + + prefix = text[:HIGHLIGHT_START] + highlight_len = min(length, HIGHLIGHT_END) - HIGHLIGHT_START + highlight = _apply_gradient( + WELCOME_HIGHLIGHT[:highlight_len], self._gradient_offset + ) + + if length > HIGHLIGHT_END: + suffix = text[HIGHLIGHT_END:] + return prefix + highlight + suffix + return prefix + highlight + + def _type_next_char(self) -> None: + if self._char_index >= len(WELCOME_TEXT): + if not self._typing_done: + self._typing_done = True + self.set_timer(0.5, self._show_button) + return + + if self._char_index == HIGHLIGHT_END and not self._paused: + self._paused = True + if self._typing_timer: + self._typing_timer.stop() + self.set_interval(0.08, self._animate_gradient) + self.set_timer(1.4, self._resume_typing) + return + + self._char_index += 1 + self._welcome_text.update(self._render_text(self._char_index)) + + def _resume_typing(self) -> None: + self._typing_timer = self.set_interval(0.03, self._type_next_char) + + def _show_button(self) -> None: + self._enter_hint.remove_class("hidden") + self._button_typing_timer = self.set_interval(0.03, self._type_button_char) + + def _type_button_char(self) -> None: + if self._button_char_index >= len(BUTTON_TEXT): + if self._button_typing_timer: + self._button_typing_timer.stop() + return + self._button_char_index += 1 + self._enter_hint.update(BUTTON_TEXT[: self._button_char_index]) + + def _animate_gradient(self) -> None: + self._gradient_offset = (self._gradient_offset + 1) % len(GRADIENT_COLORS) + self._welcome_text.update(self._render_text(self._char_index)) + + def action_next(self) -> None: + if self._typing_done: + super().action_next()