Add runtime type check for llm in Agent init

Added a type check to ensure the llm argument is an instance of BaseChatModel in Agent's constructor, raising a ValueError otherwise. Also marked BaseChatModel as @runtime_checkable to support isinstance checks.
This commit is contained in:
மனோஜ்குமார் பழனிச்சாமி
2025-07-19 13:53:39 +05:30
parent e75be4f6bb
commit 95009f7d95
2 changed files with 4 additions and 1 deletions

View File

@@ -185,6 +185,8 @@ class Agent(Generic[Context, AgentStructuredOutput]):
include_tool_call_examples: bool = False,
**kwargs,
):
if not isinstance(llm, BaseChatModel):
raise ValueError('invalid llm, must be from browser_use.llm')
# Check for deprecated planner parameters
planner_params = [planner_llm, use_vision_for_planner, is_planner_reasoning, extend_planner_system_message]
if any(param is not None and param is not False for param in planner_params) or planner_interval != 1:

View File

@@ -4,7 +4,7 @@ We have switched all of our code from langchain to openai.types.chat.chat_comple
For easier transition we have
"""
from typing import Any, Protocol, TypeVar, overload
from typing import Any, Protocol, TypeVar, overload, runtime_checkable
from pydantic import BaseModel
@@ -14,6 +14,7 @@ from browser_use.llm.views import ChatInvokeCompletion
T = TypeVar('T', bound=BaseModel)
@runtime_checkable
class BaseChatModel(Protocol):
_verified_api_keys: bool = False