mirror of
https://github.com/mistralai/mistral-vibe
synced 2026-04-25 17:14:55 +02:00
Co-authored-by: Quentin Torroba <quentin.torroba@mistral.ai> Co-authored-by: Clément Siriex <clement.sirieix@mistral.ai> Co-authored-by: Kim-Adeline Miguel <kimadeline.miguel@mistral.ai> Co-authored-by: Michel Thomazo <michel.thomazo@mistral.ai> Co-authored-by: Clément Drouin <clement.drouin@mistral.ai>
39 lines
996 B
Python
39 lines
996 B
Python
from __future__ import annotations
|
|
|
|
from typing import TYPE_CHECKING, Any, ClassVar, NamedTuple, Protocol
|
|
|
|
from vibe.core.types import AvailableTool, LLMChunk, LLMMessage, StrToolChoice
|
|
|
|
if TYPE_CHECKING:
|
|
from vibe.core.config import ProviderConfig
|
|
|
|
|
|
class PreparedRequest(NamedTuple):
|
|
endpoint: str
|
|
headers: dict[str, str]
|
|
body: bytes
|
|
base_url: str = ""
|
|
|
|
|
|
class APIAdapter(Protocol):
|
|
endpoint: ClassVar[str]
|
|
|
|
def prepare_request( # noqa: PLR0913
|
|
self,
|
|
*,
|
|
model_name: str,
|
|
messages: list[LLMMessage],
|
|
temperature: float,
|
|
tools: list[AvailableTool] | None,
|
|
max_tokens: int | None,
|
|
tool_choice: StrToolChoice | AvailableTool | None,
|
|
enable_streaming: bool,
|
|
provider: ProviderConfig,
|
|
api_key: str | None = None,
|
|
thinking: str = "off",
|
|
) -> PreparedRequest: ...
|
|
|
|
def parse_response(
|
|
self, data: dict[str, Any], provider: ProviderConfig
|
|
) -> LLMChunk: ...
|