Files
mistral-vibe/vibe/core/llm/backend/base.py
Mathias Gesbert ec7f3b25ea v2.2.0 (#395)
Co-authored-by: Quentin Torroba <quentin.torroba@mistral.ai>
Co-authored-by: Clément Siriex <clement.sirieix@mistral.ai>
Co-authored-by: Kim-Adeline Miguel <kimadeline.miguel@mistral.ai>
Co-authored-by: Michel Thomazo <michel.thomazo@mistral.ai>
Co-authored-by: Clément Drouin <clement.drouin@mistral.ai>
2026-02-17 16:23:28 +01:00

39 lines
996 B
Python

from __future__ import annotations
from typing import TYPE_CHECKING, Any, ClassVar, NamedTuple, Protocol
from vibe.core.types import AvailableTool, LLMChunk, LLMMessage, StrToolChoice
if TYPE_CHECKING:
from vibe.core.config import ProviderConfig
class PreparedRequest(NamedTuple):
endpoint: str
headers: dict[str, str]
body: bytes
base_url: str = ""
class APIAdapter(Protocol):
endpoint: ClassVar[str]
def prepare_request( # noqa: PLR0913
self,
*,
model_name: str,
messages: list[LLMMessage],
temperature: float,
tools: list[AvailableTool] | None,
max_tokens: int | None,
tool_choice: StrToolChoice | AvailableTool | None,
enable_streaming: bool,
provider: ProviderConfig,
api_key: str | None = None,
thinking: str = "off",
) -> PreparedRequest: ...
def parse_response(
self, data: dict[str, Any], provider: ProviderConfig
) -> LLMChunk: ...