Files
mistral-vibe/vibe/core/llm/backend/base.py
Clément Drouin e1a25caa52 v2.7.5 (#589)
Co-authored-by: Bastien <bastien.baret@gmail.com>
Co-authored-by: Clément Sirieix <clement.sirieix@mistral.ai>
Co-authored-by: Julien Legrand <72564015+JulienLGRD@users.noreply.github.com>
Co-authored-by: Kim-Adeline Miguel <51720070+kimadeline@users.noreply.github.com>
Co-authored-by: Mathias Gesbert <mathias.gesbert@mistral.ai>
Co-authored-by: Pierre Rossinès <pierre.rossines@mistral.ai>
Co-authored-by: Quentin <quentin.torroba@mistral.ai>
Co-authored-by: Vincent G <10739306+VinceOPS@users.noreply.github.com>
Co-authored-by: Mistral Vibe <vibe@mistral.ai>
2026-04-14 10:33:15 +02:00

40 lines
1020 B
Python

from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING, Any, ClassVar, NamedTuple, Protocol
from vibe.core.types import AvailableTool, LLMChunk, LLMMessage, StrToolChoice
if TYPE_CHECKING:
from vibe.core.config import ProviderConfig
class PreparedRequest(NamedTuple):
endpoint: str
headers: dict[str, str]
body: bytes
base_url: str = ""
class APIAdapter(Protocol):
endpoint: ClassVar[str]
def prepare_request(
self,
*,
model_name: str,
messages: Sequence[LLMMessage],
temperature: float,
tools: list[AvailableTool] | None,
max_tokens: int | None,
tool_choice: StrToolChoice | AvailableTool | None,
enable_streaming: bool,
provider: ProviderConfig,
api_key: str | None = None,
thinking: str = "off",
) -> PreparedRequest: ...
def parse_response(
self, data: dict[str, Any], provider: ProviderConfig
) -> LLMChunk: ...