Skip to content
Snippets Groups Projects
Unverified Commit d29b3ae2 authored by Mark Chao's avatar Mark Chao Committed by GitLab
Browse files

Merge branch 'rename-single-action-executor' into 'master'

Rename SingleActionExecutor to ReActExecutor

See merge request https://gitlab.com/gitlab-org/gitlab/-/merge_requests/168143



Merged-by: default avatarMark Chao <mchao@gitlab.com>
Approved-by: default avatarMark Chao <mchao@gitlab.com>
Reviewed-by: default avatarMark Chao <mchao@gitlab.com>
Co-authored-by: default avatarShinya Maeda <shinya@gitlab.com>
parents beeafe13 5c3b64ef
No related branches found
No related tags found
No related merge requests found
# frozen_string_literal: true
module Gitlab
module Duo
module Chat
class ReactExecutor
include Gitlab::Utils::StrongMemoize
include Langsmith::RunHelpers
include ::Gitlab::Llm::Concerns::Logger
ToolNotFoundError = Class.new(StandardError)
EmptyEventsError = Class.new(StandardError)
ExhaustedLoopError = Class.new(StandardError)
AgentEventError = Class.new(StandardError)
attr_reader :tools, :user_input, :context, :response_handler
attr_accessor :iterations
MAX_ITERATIONS = 10
# @param [String] user_input - a question from a user
# @param [Array<Tool>] tools - an array of Tools defined in the tools module.
# @param [GitlabContext] context - Gitlab context containing useful context information
# @param [ResponseService] response_handler - Handles returning the response to the client
# @param [ResponseService] stream_response_handler - Handles streaming chunks to the client
def initialize(user_input:, tools:, context:, response_handler:, stream_response_handler: nil)
@user_input = user_input
@tools = tools
@context = context
@iterations = 0
@response_handler = response_handler
@stream_response_handler = stream_response_handler
end
def execute
MAX_ITERATIONS.times do
events = step_forward
raise EmptyEventsError if events.empty?
answer = process_final_answer(events) ||
process_tool_action(events) ||
process_unknown(events)
return answer if answer
end
raise ExhaustedLoopError
rescue StandardError => error
Gitlab::ErrorTracking.track_exception(error)
error_answer(error)
end
traceable :execute, name: 'Run ReAct'
private
# TODO: Improve these error messages. See https://gitlab.com/gitlab-org/gitlab/-/issues/479465
# TODO Handle ForbiddenError, ClientError, ServerError.
def error_answer(error)
case error
when Net::ReadTimeout
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, I couldn't respond in time. Please try again."),
source: "chat_v2",
error_code: "A1000"
)
when Gitlab::Llm::AiGateway::Client::ConnectionError
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
source: "chat_v2",
error_code: "A1001"
)
when EmptyEventsError
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, I couldn't respond in time. Please try again."),
source: "chat_v2",
error_code: "A1002"
)
when EOFError
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
source: "chat_v2",
error_code: "A1003"
)
when AgentEventError
if error.message.present? && error.message.include?("prompt is too long")
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, you've entered too many prompts. Please run /clear " \
"or /reset before asking the next question."),
source: "chat_v2",
error_code: "A1005"
)
else
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
source: "chat_v2",
error_code: "A1004"
)
end
when ExhaustedLoopError
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, Duo Chat agent reached the limit before finding an answer for your question. " \
"Please try a different prompt or clear your conversation history with /clear."),
source: "chat_v2",
error_code: "A1006"
)
when Gitlab::AiGateway::ForbiddenError
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, you don't have the GitLab Duo subscription required " \
"to use Duo Chat. Please contact your administrator."),
source: "chat_v2",
error_code: "M3006"
)
else
Gitlab::Llm::Chain::Answer.error_answer(
error: error,
context: context,
source: "chat_v2",
error_code: "A9999"
)
end
end
def process_final_answer(events)
events = events.select { |e| e.instance_of? Gitlab::Duo::Chat::AgentEvents::FinalAnswerDelta }
return if events.empty?
content = events.map(&:text).join("")
Gitlab::Llm::Chain::Answer.final_answer(context: context, content: content)
end
def process_tool_action(events)
event = events.find { |e| e.instance_of? Gitlab::Duo::Chat::AgentEvents::Action }
return unless event
tool_class = get_tool_class(event.tool)
tool = tool_class.new(
context: context,
options: {
input: user_input,
suggestions: event.thought
},
stream_response_handler: stream_response_handler
)
tool_answer = tool.execute
return tool_answer if tool_answer.is_final?
step_executor.update_observation(tool_answer.content.strip)
nil
end
def process_unknown(events)
event = events.find { |e| e.instance_of? Gitlab::Duo::Chat::AgentEvents::Unknown }
return unless event
log_warn(message: "Surface an unknown event as a final answer to the user",
event_name: 'unknown_event',
ai_component: 'duo_chat')
Gitlab::Llm::Chain::Answer.final_answer(context: context, content: event.text)
end
def step_executor
@step_executor ||= Gitlab::Duo::Chat::StepExecutor.new(context.current_user)
end
def step_forward
streamed_answer = Gitlab::Llm::Chain::StreamedAnswer.new
step_executor.step(step_params) do |event|
raise AgentEventError, event.message if event.instance_of? Gitlab::Duo::Chat::AgentEvents::Error
next unless stream_response_handler
next unless event.instance_of? Gitlab::Duo::Chat::AgentEvents::FinalAnswerDelta
chunk = streamed_answer.next_chunk(event.text)
next unless chunk
stream_response_handler.execute(
response: Gitlab::Llm::Chain::StreamedResponseModifier
.new(chunk[:content], chunk_id: chunk[:id]),
options: { chunk_id: chunk[:id] }
)
end
end
def step_params
{
messages: messages,
model_metadata: model_metadata_params,
unavailable_resources: unavailable_resources_params
}
end
def messages
conversation.append(
{
role: "user",
content: user_input,
context: current_resource_params,
current_file: current_file_params,
additional_context: context.additional_context
}
)
end
def get_tool_class(tool)
tool_name = tool.camelize
tool_class = tools.find { |tool_class| tool_class::Executor::NAME == tool_name }
unless tool_class
# Make sure that the v2/chat/agent endpoint in AI Gateway and the GitLab-Rails are compatible.
log_error(message: "Failed to find a tool in GitLab Rails",
event_name: 'tool_not_find',
ai_component: 'duo_chat',
tool_name: tool)
raise ToolNotFoundError, tool: tool_name
end
tool_class::Executor
end
def unavailable_resources_params
%w[Pipelines Vulnerabilities]
end
attr_reader :stream_response_handler
def model_metadata_params
return unless chat_feature_setting&.self_hosted?
self_hosted_model = chat_feature_setting.self_hosted_model
{
provider: :openai, # for self-hosted models we support Messages API format at the moment
name: self_hosted_model.model,
endpoint: self_hosted_model.endpoint,
api_key: self_hosted_model.api_token,
identifier: self_hosted_model.identifier
}
end
def conversation
Gitlab::Llm::Chain::Utils::ChatConversation.new(context.current_user)
.truncated_conversation_list
end
def current_resource_params
return unless current_resource_type
{
type: current_resource_type,
content: current_resource_content
}
end
def current_resource_type
context.current_page_type
rescue ArgumentError
nil
end
strong_memoize_attr :current_resource_type
def current_resource_content
context.current_page_short_description
rescue ArgumentError
nil
end
strong_memoize_attr :current_resource_content
def current_file_params
return unless current_selection || current_blob
if current_selection
file_path = current_selection[:file_name]
data = current_selection[:selected_text]
else
file_path = current_blob.path
data = current_blob.data
end
{
file_path: file_path,
data: data,
selected_code: !!current_selection
}
end
def current_selection
return unless context.current_file[:selected_text].present?
context.current_file
end
strong_memoize_attr :current_selection
def current_blob
context.extra_resource[:blob]
end
strong_memoize_attr :current_blob
def chat_feature_setting
::Ai::FeatureSetting.find_by_feature(:duo_chat)
end
end
end
end
end
# frozen_string_literal: true
module Gitlab
module Llm
module Chain
module Agents
# TODO: Rename to Gitlab::Duo::Chat::MultiStepExecutor
class SingleActionExecutor
include Gitlab::Utils::StrongMemoize
include Langsmith::RunHelpers
include ::Gitlab::Llm::Concerns::Logger
ToolNotFoundError = Class.new(StandardError)
EmptyEventsError = Class.new(StandardError)
ExhaustedLoopError = Class.new(StandardError)
AgentEventError = Class.new(StandardError)
attr_reader :tools, :user_input, :context, :response_handler
attr_accessor :iterations
MAX_ITERATIONS = 10
# @param [String] user_input - a question from a user
# @param [Array<Tool>] tools - an array of Tools defined in the tools module.
# @param [GitlabContext] context - Gitlab context containing useful context information
# @param [ResponseService] response_handler - Handles returning the response to the client
# @param [ResponseService] stream_response_handler - Handles streaming chunks to the client
def initialize(user_input:, tools:, context:, response_handler:, stream_response_handler: nil)
@user_input = user_input
@tools = tools
@context = context
@iterations = 0
@response_handler = response_handler
@stream_response_handler = stream_response_handler
end
def execute
MAX_ITERATIONS.times do
events = step_forward
raise EmptyEventsError if events.empty?
answer = process_final_answer(events) ||
process_tool_action(events) ||
process_unknown(events)
return answer if answer
end
raise ExhaustedLoopError
rescue StandardError => error
Gitlab::ErrorTracking.track_exception(error)
error_answer(error)
end
traceable :execute, name: 'Run ReAct'
private
# TODO: Improve these error messages. See https://gitlab.com/gitlab-org/gitlab/-/issues/479465
# TODO Handle ForbiddenError, ClientError, ServerError.
def error_answer(error)
case error
when Net::ReadTimeout
Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, I couldn't respond in time. Please try again."),
source: "chat_v2",
error_code: "A1000"
)
when Gitlab::Llm::AiGateway::Client::ConnectionError
Answer.error_answer(
error: error,
context: context,
source: "chat_v2",
error_code: "A1001"
)
when EmptyEventsError
Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, I couldn't respond in time. Please try again."),
source: "chat_v2",
error_code: "A1002"
)
when EOFError
Answer.error_answer(
error: error,
context: context,
source: "chat_v2",
error_code: "A1003"
)
when AgentEventError
if error.message.present? && error.message.include?("prompt is too long")
Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, you've entered too many prompts. Please run /clear " \
"or /reset before asking the next question."),
source: "chat_v2",
error_code: "A1005"
)
else
Answer.error_answer(
error: error,
context: context,
source: "chat_v2",
error_code: "A1004"
)
end
when ExhaustedLoopError
Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, Duo Chat agent reached the limit before finding an answer for your question. " \
"Please try a different prompt or clear your conversation history with /clear."),
source: "chat_v2",
error_code: "A1006"
)
when Gitlab::AiGateway::ForbiddenError
Answer.error_answer(
error: error,
context: context,
content: _("I'm sorry, you don't have the GitLab Duo subscription required " \
"to use Duo Chat. Please contact your administrator."),
source: "chat_v2",
error_code: "M3006"
)
else
Answer.error_answer(
error: error,
context: context,
source: "chat_v2",
error_code: "A9999"
)
end
end
def process_final_answer(events)
events = events.select { |e| e.instance_of? Gitlab::Duo::Chat::AgentEvents::FinalAnswerDelta }
return if events.empty?
content = events.map(&:text).join("")
Answer.final_answer(context: context, content: content)
end
def process_tool_action(events)
event = events.find { |e| e.instance_of? Gitlab::Duo::Chat::AgentEvents::Action }
return unless event
tool_class = get_tool_class(event.tool)
tool = tool_class.new(
context: context,
options: {
input: user_input,
suggestions: event.thought
},
stream_response_handler: stream_response_handler
)
tool_answer = tool.execute
return tool_answer if tool_answer.is_final?
step_executor.update_observation(tool_answer.content.strip)
nil
end
def process_unknown(events)
event = events.find { |e| e.instance_of? Gitlab::Duo::Chat::AgentEvents::Unknown }
return unless event
log_warn(message: "Surface an unknown event as a final answer to the user",
event_name: 'unknown_event',
ai_component: 'duo_chat')
Answer.final_answer(context: context, content: event.text)
end
def step_executor
@step_executor ||= Gitlab::Duo::Chat::StepExecutor.new(context.current_user)
end
def step_forward
streamed_answer = Gitlab::Llm::Chain::StreamedAnswer.new
step_executor.step(step_params) do |event|
raise AgentEventError, event.message if event.instance_of? Gitlab::Duo::Chat::AgentEvents::Error
next unless stream_response_handler
next unless event.instance_of? Gitlab::Duo::Chat::AgentEvents::FinalAnswerDelta
chunk = streamed_answer.next_chunk(event.text)
next unless chunk
stream_response_handler.execute(
response: Gitlab::Llm::Chain::StreamedResponseModifier
.new(chunk[:content], chunk_id: chunk[:id]),
options: { chunk_id: chunk[:id] }
)
end
end
def step_params
{
messages: messages,
model_metadata: model_metadata_params,
unavailable_resources: unavailable_resources_params
}
end
def messages
conversation.append(
{
role: "user",
content: user_input,
context: current_resource_params,
current_file: current_file_params,
additional_context: context.additional_context
}
)
end
def get_tool_class(tool)
tool_name = tool.camelize
tool_class = tools.find { |tool_class| tool_class::Executor::NAME == tool_name }
unless tool_class
# Make sure that the v2/chat/agent endpoint in AI Gateway and the GitLab-Rails are compatible.
log_error(message: "Failed to find a tool in GitLab Rails",
event_name: 'tool_not_find',
ai_component: 'duo_chat',
tool_name: tool)
raise ToolNotFoundError, tool: tool_name
end
tool_class::Executor
end
def unavailable_resources_params
%w[Pipelines Vulnerabilities]
end
attr_reader :stream_response_handler
def model_metadata_params
return unless chat_feature_setting&.self_hosted?
self_hosted_model = chat_feature_setting.self_hosted_model
{
provider: :openai, # for self-hosted models we support Messages API format at the moment
name: self_hosted_model.model,
endpoint: self_hosted_model.endpoint,
api_key: self_hosted_model.api_token,
identifier: self_hosted_model.identifier
}
end
def conversation
Utils::ChatConversation.new(context.current_user)
.truncated_conversation_list
end
def current_resource_params
return unless current_resource_type
{
type: current_resource_type,
content: current_resource_content
}
end
def current_resource_type
context.current_page_type
rescue ArgumentError
nil
end
strong_memoize_attr :current_resource_type
def current_resource_content
context.current_page_short_description
rescue ArgumentError
nil
end
strong_memoize_attr :current_resource_content
def current_file_params
return unless current_selection || current_blob
if current_selection
file_path = current_selection[:file_name]
data = current_selection[:selected_text]
else
file_path = current_blob.path
data = current_blob.data
end
{
file_path: file_path,
data: data,
selected_code: !!current_selection
}
end
def current_selection
return unless context.current_file[:selected_text].present?
context.current_file
end
strong_memoize_attr :current_selection
def current_blob
context.extra_resource[:blob]
end
strong_memoize_attr :current_blob
def chat_feature_setting
::Ai::FeatureSetting.find_by_feature(:duo_chat)
end
end
end
end
end
end
# frozen_string_literal: true
 
# Deprecation: this executor will be removed in favor of SingleActionExecutor
# Deprecation: this executor will be removed in favor of ReactExecutor
# see https://gitlab.com/gitlab-org/gitlab/-/issues/469087
 
module Gitlab
Loading
Loading
# frozen_string_literal: true
 
# Deprecation: SingleActionExecutor doesn't use this modifier
# Deprecation: ReactExecutor doesn't use this modifier
# as picked_tool_action method isn't used anymore.
# This class will be removed alongside ZeroShot::Executor
# see https://gitlab.com/gitlab-org/gitlab/-/issues/469087
Loading
Loading
Loading
Loading
@@ -130,7 +130,7 @@ def execute_with_tool_chosen_by_ai(response_handler, stream_response_handler)
if Feature.enabled?(:v2_chat_agent_integration, user) &&
Feature.disabled?(:v2_chat_agent_integration_override, user)
 
Gitlab::Llm::Chain::Agents::SingleActionExecutor.new(
Gitlab::Duo::Chat::ReactExecutor.new(
user_input: prompt_message.content,
tools: tools,
context: context,
Loading
Loading
Loading
Loading
@@ -2,7 +2,7 @@
 
require 'spec_helper'
 
RSpec.describe Gitlab::Llm::Chain::Agents::SingleActionExecutor, feature_category: :duo_chat do
RSpec.describe Gitlab::Duo::Chat::ReactExecutor, feature_category: :duo_chat do
include FakeBlobHelpers
 
describe "#execute" do
Loading
Loading
Loading
Loading
@@ -98,7 +98,7 @@
stream_response_handler: stream_response_handler
]
 
expect_next_instance_of(::Gitlab::Llm::Chain::Agents::SingleActionExecutor, *expected_params) do |instance|
expect_next_instance_of(::Gitlab::Duo::Chat::ReactExecutor, *expected_params) do |instance|
expect(instance).to receive(:execute).and_return(answer)
end
 
Loading
Loading
@@ -145,7 +145,7 @@
stream_response_handler: stream_response_handler
]
 
expect_next_instance_of(::Gitlab::Llm::Chain::Agents::SingleActionExecutor, *expected_params) do |instance|
expect_next_instance_of(::Gitlab::Duo::Chat::ReactExecutor, *expected_params) do |instance|
expect(instance).to receive(:execute).and_return(answer)
end
 
Loading
Loading
@@ -176,7 +176,7 @@
end
 
it 'sends process_gitlab_duo_question snowplow event with value eql 0' do
allow_next_instance_of(::Gitlab::Llm::Chain::Agents::SingleActionExecutor) do |instance|
allow_next_instance_of(::Gitlab::Duo::Chat::ReactExecutor) do |instance|
expect(instance).to receive(:execute).and_return(answer)
end
 
Loading
Loading
@@ -249,7 +249,7 @@
stream_response_handler: stream_response_handler
]
 
expect_next_instance_of(::Gitlab::Llm::Chain::Agents::SingleActionExecutor, *expected_params) do |instance|
expect_next_instance_of(::Gitlab::Duo::Chat::ReactExecutor, *expected_params) do |instance|
expect(instance).to receive(:execute).and_return(answer)
end
expect(response_handler).to receive(:execute)
Loading
Loading
@@ -346,7 +346,7 @@
command: an_instance_of(::Gitlab::Llm::Chain::SlashCommand)
}
 
expect(::Gitlab::Llm::Chain::Agents::SingleActionExecutor).not_to receive(:new)
expect(::Gitlab::Duo::Chat::ReactExecutor).not_to receive(:new)
expect(expected_tool)
.to receive(:new).with(expected_params).and_return(executor)
 
Loading
Loading
@@ -416,7 +416,7 @@
let(:command) { '/explain2' }
 
it 'process the message with zero shot agent' do
expect_next_instance_of(::Gitlab::Llm::Chain::Agents::SingleActionExecutor) do |instance|
expect_next_instance_of(::Gitlab::Duo::Chat::ReactExecutor) do |instance|
expect(instance).to receive(:execute).and_return(answer)
end
expect(::Gitlab::Llm::Chain::Tools::ExplainCode::Executor).not_to receive(:new)
Loading
Loading
@@ -440,7 +440,7 @@
stream_response_handler: stream_response_handler
]
 
allow_next_instance_of(::Gitlab::Llm::Chain::Agents::SingleActionExecutor, *expected_params) do |instance|
allow_next_instance_of(::Gitlab::Duo::Chat::ReactExecutor, *expected_params) do |instance|
allow(instance).to receive(:execute).and_return(answer)
end
 
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment