Files
sure/app/models/assistant/responder.rb
Juan José Mata 7b2b1dd367 Rebase PR #784 and fix OpenAI model/chat regressions (#1384)
* Wire conversation history through OpenAI responses API

* Fix RuboCop hash brace spacing in assistant tests

* Pipelock ignores

* Batch fixes

---------

Co-authored-by: sokiee <sokysrm@gmail.com>
2026-04-15 18:45:24 +02:00

161 lines
4.4 KiB
Ruby

class Assistant::Responder
def initialize(message:, instructions:, function_tool_caller:, llm:)
@message = message
@instructions = instructions
@function_tool_caller = function_tool_caller
@llm = llm
end
def on(event_name, &block)
listeners[event_name.to_sym] << block
end
def respond(previous_response_id: nil)
# Track whether response was handled by streamer
response_handled = false
# For the first response
streamer = proc do |chunk|
case chunk.type
when "output_text"
emit(:output_text, chunk.data)
when "response"
response = chunk.data
response_handled = true
if response.function_requests.any?
handle_follow_up_response(response)
else
emit(:response, { id: response.id })
end
end
end
response = get_llm_response(streamer: streamer, previous_response_id: previous_response_id)
# For synchronous (non-streaming) responses, handle function requests if not already handled by streamer
unless response_handled
if response && response.function_requests.any?
handle_follow_up_response(response)
elsif response
emit(:response, { id: response.id })
end
end
end
private
attr_reader :message, :instructions, :function_tool_caller, :llm
def handle_follow_up_response(response)
streamer = proc do |chunk|
case chunk.type
when "output_text"
emit(:output_text, chunk.data)
when "response"
# We do not currently support function executions for a follow-up response (avoid recursive LLM calls that could lead to high spend)
emit(:response, { id: chunk.data.id })
end
end
function_tool_calls = function_tool_caller.fulfill_requests(response.function_requests)
emit(:response, {
id: response.id,
function_tool_calls: function_tool_calls
})
# Get follow-up response with tool call results
get_llm_response(
streamer: streamer,
function_results: function_tool_calls.map(&:to_result),
previous_response_id: response.id
)
end
def get_llm_response(streamer:, function_results: [], previous_response_id: nil)
response = llm.chat_response(
message.content,
model: message.ai_model,
instructions: instructions,
functions: function_tool_caller.function_definitions,
function_results: function_results,
messages: conversation_history,
streamer: streamer,
previous_response_id: previous_response_id,
session_id: chat_session_id,
user_identifier: chat_user_identifier,
family: message.chat&.user&.family
)
unless response.success?
raise response.error
end
response.data
end
def emit(event_name, payload = nil)
listeners[event_name.to_sym].each { |block| block.call(payload) }
end
def listeners
@listeners ||= Hash.new { |h, k| h[k] = [] }
end
def chat_session_id
chat&.id&.to_s
end
def chat_user_identifier
return unless chat&.user_id
::Digest::SHA256.hexdigest(chat.user_id.to_s)
end
def chat
@chat ||= message.chat
end
def conversation_history
messages = []
return messages unless chat&.messages
chat.messages
.where(type: [ "UserMessage", "AssistantMessage" ], status: "complete")
.includes(:tool_calls)
.ordered
.each do |chat_message|
if chat_message.tool_calls.any?
messages << {
role: chat_message.role,
content: chat_message.content || "",
tool_calls: chat_message.tool_calls.map(&:to_tool_call)
}
chat_message.tool_calls.map(&:to_result).each do |fn_result|
# Handle nil explicitly to avoid serializing to "null"
output = fn_result[:output]
content = if output.nil?
""
elsif output.is_a?(String)
output
else
output.to_json
end
messages << {
role: "tool",
tool_call_id: fn_result[:call_id],
name: fn_result[:name],
content: content
}
end
elsif !chat_message.content.blank?
messages << { role: chat_message.role, content: chat_message.content || "" }
end
end
messages
end
end