mirror of
https://github.com/browser-use/browser-use
synced 2026-05-06 17:52:15 +02:00
Add runtime type check for llm in Agent init
Added a type check to ensure the llm argument is an instance of BaseChatModel in Agent's constructor, raising a ValueError otherwise. Also marked BaseChatModel as @runtime_checkable to support isinstance checks.
This commit is contained in:
@@ -185,6 +185,8 @@ class Agent(Generic[Context, AgentStructuredOutput]):
|
||||
include_tool_call_examples: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
if not isinstance(llm, BaseChatModel):
|
||||
raise ValueError('invalid llm, must be from browser_use.llm')
|
||||
# Check for deprecated planner parameters
|
||||
planner_params = [planner_llm, use_vision_for_planner, is_planner_reasoning, extend_planner_system_message]
|
||||
if any(param is not None and param is not False for param in planner_params) or planner_interval != 1:
|
||||
|
||||
@@ -4,7 +4,7 @@ We have switched all of our code from langchain to openai.types.chat.chat_comple
|
||||
For easier transition we have
|
||||
"""
|
||||
|
||||
from typing import Any, Protocol, TypeVar, overload
|
||||
from typing import Any, Protocol, TypeVar, overload, runtime_checkable
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -14,6 +14,7 @@ from browser_use.llm.views import ChatInvokeCompletion
|
||||
T = TypeVar('T', bound=BaseModel)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class BaseChatModel(Protocol):
|
||||
_verified_api_keys: bool = False
|
||||
|
||||
|
||||
Reference in New Issue
Block a user