Merge branch 'main' into openrouter_support

This commit is contained in:
Will Bonde
2025-07-03 14:09:28 -04:00
committed by GitHub
2 changed files with 25 additions and 17 deletions

View File

@@ -193,24 +193,25 @@ class AgentOutput(BaseModel):
def type_with_custom_actions_no_thinking(custom_actions: type[ActionModel]) -> type[AgentOutput]:
"""Extend actions with custom actions and exclude thinking field"""
# Create a base model without thinking, but inheriting from AgentOutput
# Override only the fields we need to change
model_ = create_model(
class AgentOutputNoThinking(AgentOutput):
@classmethod
def model_json_schema(cls, **kwargs):
schema = super().model_json_schema(**kwargs)
del schema['properties']['thinking']
return schema
model = create_model(
'AgentOutput',
__base__=AgentOutput,
thinking=(
type(None), # type: ignore
Field(default=None, exclude=True),
), # Exclude thinking from schema
__base__=AgentOutputNoThinking,
action=(
list[custom_actions], # type: ignore
Field(..., description='List of actions to execute', json_schema_extra={'min_items': 1}),
),
__module__=AgentOutput.__module__,
__module__=AgentOutputNoThinking.__module__,
)
model_.__doc__ = 'AgentOutput model with custom actions'
return model_
model.__doc__ = 'AgentOutput model with custom actions'
return model
class AgentHistory(BaseModel):

View File

@@ -96,8 +96,15 @@ class ChatOpenAI(BaseChatModel):
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
usage = (
ChatInvokeUsage(
if response.usage is not None:
completion_tokens = response.usage.completion_tokens
completion_token_details = response.usage.completion_tokens_details
if completion_token_details is not None:
reasoning_tokens = completion_token_details.reasoning_tokens
if reasoning_tokens is not None:
completion_tokens += reasoning_tokens
usage = ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=response.usage.prompt_tokens_details.cached_tokens
if response.usage.prompt_tokens_details is not None
@@ -105,12 +112,12 @@ class ChatOpenAI(BaseChatModel):
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
# Completion
completion_tokens=response.usage.completion_tokens,
completion_tokens=completion_tokens,
total_tokens=response.usage.total_tokens,
)
if response.usage is not None
else None
)
else:
usage = None
return usage
@overload