diff --git a/browser_use/agent/views.py b/browser_use/agent/views.py index 6b717d5c1..485547cce 100644 --- a/browser_use/agent/views.py +++ b/browser_use/agent/views.py @@ -193,24 +193,25 @@ class AgentOutput(BaseModel): def type_with_custom_actions_no_thinking(custom_actions: type[ActionModel]) -> type[AgentOutput]: """Extend actions with custom actions and exclude thinking field""" - # Create a base model without thinking, but inheriting from AgentOutput - # Override only the fields we need to change - model_ = create_model( + class AgentOutputNoThinking(AgentOutput): + @classmethod + def model_json_schema(cls, **kwargs): + schema = super().model_json_schema(**kwargs) + del schema['properties']['thinking'] + return schema + + model = create_model( 'AgentOutput', - __base__=AgentOutput, - thinking=( - type(None), # type: ignore - Field(default=None, exclude=True), - ), # Exclude thinking from schema + __base__=AgentOutputNoThinking, action=( list[custom_actions], # type: ignore Field(..., description='List of actions to execute', json_schema_extra={'min_items': 1}), ), - __module__=AgentOutput.__module__, + __module__=AgentOutputNoThinking.__module__, ) - model_.__doc__ = 'AgentOutput model with custom actions' - return model_ + model.__doc__ = 'AgentOutput model with custom actions' + return model class AgentHistory(BaseModel): diff --git a/browser_use/llm/openai/chat.py b/browser_use/llm/openai/chat.py index 2b49ea20f..e91b662c7 100644 --- a/browser_use/llm/openai/chat.py +++ b/browser_use/llm/openai/chat.py @@ -96,8 +96,15 @@ class ChatOpenAI(BaseChatModel): return str(self.model) def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None: - usage = ( - ChatInvokeUsage( + if response.usage is not None: + completion_tokens = response.usage.completion_tokens + completion_token_details = response.usage.completion_tokens_details + if completion_token_details is not None: + reasoning_tokens = completion_token_details.reasoning_tokens + if reasoning_tokens is not None: + completion_tokens += reasoning_tokens + + usage = ChatInvokeUsage( prompt_tokens=response.usage.prompt_tokens, prompt_cached_tokens=response.usage.prompt_tokens_details.cached_tokens if response.usage.prompt_tokens_details is not None @@ -105,12 +112,12 @@ class ChatOpenAI(BaseChatModel): prompt_cache_creation_tokens=None, prompt_image_tokens=None, # Completion - completion_tokens=response.usage.completion_tokens, + completion_tokens=completion_tokens, total_tokens=response.usage.total_tokens, ) - if response.usage is not None - else None - ) + else: + usage = None + return usage @overload