Skip to content
Snippets Groups Projects
Unverified Commit 601d558b authored by pshutsin's avatar pshutsin Committed by Nicolas Dular
Browse files

Rename Llm::Cache classes

Renames `Llm::Cache` to `ChatStorage + ChatMessage` classes. 
parent 2a99cba4
No related branches found
No related tags found
No related merge requests found
Showing
with 80 additions and 75 deletions
Loading
@@ -16,7 +16,7 @@ class MessagesResolver < BaseResolver
Loading
@@ -16,7 +16,7 @@ class MessagesResolver < BaseResolver
def resolve(**args) def resolve(**args)
return [] unless current_user return [] unless current_user
   
::Gitlab::Llm::Cache.new(current_user).find_all(args) ::Gitlab::Llm::ChatStorage.new(current_user).messages(args)
end end
end end
end end
Loading
Loading
Loading
@@ -6,7 +6,7 @@ class ChatMessageRoleEnum < BaseEnum
Loading
@@ -6,7 +6,7 @@ class ChatMessageRoleEnum < BaseEnum
graphql_name 'AiChatMessageRole' graphql_name 'AiChatMessageRole'
description 'Roles to filter in chat message.' description 'Roles to filter in chat message.'
   
::Gitlab::Llm::Cache::ALLOWED_ROLES.each do |role| ::Gitlab::Llm::ChatMessage::ALLOWED_ROLES.each do |role|
value role.upcase, description: "Filter only #{role} messages.", value: role value role.upcase, description: "Filter only #{role} messages.", value: role
end end
end end
Loading
Loading
Loading
@@ -40,12 +40,12 @@ def worker_perform(user, resource, action_name, options)
Loading
@@ -40,12 +40,12 @@ def worker_perform(user, resource, action_name, options)
message = content(action_name) message = content(action_name)
payload = { payload = {
request_id: request_id, request_id: request_id,
role: ::Gitlab::Llm::Cache::ROLE_USER, role: ::Gitlab::Llm::ChatMessage::ROLE_USER,
content: message, content: message,
timestamp: Time.current timestamp: Time.current
} }
   
::Gitlab::Llm::Cache.new(user).add(payload) if cache_response?(options) ::Gitlab::Llm::ChatStorage.new(user).add(payload) if cache_response?(options)
   
if emit_response?(options) if emit_response?(options)
# We do not add the `client_subscription_id` here on purpose for now. # We do not add the `client_subscription_id` here on purpose for now.
Loading
@@ -106,7 +106,7 @@ def content(action_name)
Loading
@@ -106,7 +106,7 @@ def content(action_name)
end end
   
def no_worker_message?(content) def no_worker_message?(content)
content == ::Gitlab::Llm::CachedMessage::RESET_MESSAGE content == ::Gitlab::Llm::ChatMessage::RESET_MESSAGE
end end
   
def cache_response?(options) def cache_response?(options)
Loading
Loading
Loading
@@ -92,7 +92,7 @@ def execute_streamed_request
Loading
@@ -92,7 +92,7 @@ def execute_streamed_request
response: Gitlab::Llm::Chain::PlainResponseModifier.new(content), response: Gitlab::Llm::Chain::PlainResponseModifier.new(content),
options: { options: {
cache_response: false, cache_response: false,
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
chunk_id: chunk[:id] chunk_id: chunk[:id]
} }
) )
Loading
@@ -134,7 +134,8 @@ def picked_tool_action(tool_class)
Loading
@@ -134,7 +134,8 @@ def picked_tool_action(tool_class)
   
response_handler.execute( response_handler.execute(
response: Gitlab::Llm::Chain::ToolResponseModifier.new(tool_class), response: Gitlab::Llm::Chain::ToolResponseModifier.new(tool_class),
options: { cache_response: false, role: ::Gitlab::Llm::Cache::ROLE_SYSTEM, type: RESPONSE_TYPE_TOOL } options: { cache_response: false, role: ::Gitlab::Llm::ChatMessage::ROLE_SYSTEM,
type: RESPONSE_TYPE_TOOL }
) )
end end
   
Loading
@@ -143,7 +144,7 @@ def prompt_version
Loading
@@ -143,7 +144,7 @@ def prompt_version
end end
   
def last_conversation def last_conversation
Cache.new(context.current_user).last_conversation ChatStorage.new(context.current_user).last_conversation
end end
strong_memoize_attr :last_conversation strong_memoize_attr :last_conversation
   
Loading
Loading
Loading
@@ -7,14 +7,14 @@ module Agents
Loading
@@ -7,14 +7,14 @@ module Agents
module ZeroShot module ZeroShot
module Prompts module Prompts
ROLE_NAMES = { ROLE_NAMES = {
Llm::Cache::ROLE_USER => 'Human', Llm::ChatMessage::ROLE_USER => 'Human',
Llm::Cache::ROLE_ASSISTANT => 'Assistant' Llm::ChatMessage::ROLE_ASSISTANT => 'Assistant'
}.freeze }.freeze
   
class Anthropic < Base class Anthropic < Base
def self.prompt(options) def self.prompt(options)
text = <<~PROMPT text = <<~PROMPT
#{ROLE_NAMES[Llm::Cache::ROLE_USER]}: #{base_prompt(options)} #{ROLE_NAMES[Llm::ChatMessage::ROLE_USER]}: #{base_prompt(options)}
PROMPT PROMPT
   
history = truncated_conversation(options[:conversation], Requests::Anthropic::PROMPT_SIZE - text.size) history = truncated_conversation(options[:conversation], Requests::Anthropic::PROMPT_SIZE - text.size)
Loading
Loading
Loading
@@ -2,7 +2,12 @@
Loading
@@ -2,7 +2,12 @@
   
module Gitlab module Gitlab
module Llm module Llm
class CachedMessage class ChatMessage
ROLE_USER = 'user'
ROLE_ASSISTANT = 'assistant'
ROLE_SYSTEM = 'system'
ALLOWED_ROLES = [ROLE_USER, ROLE_ASSISTANT, ROLE_SYSTEM].freeze
attr_reader :id, :request_id, :content, :role, :timestamp, :error attr_reader :id, :request_id, :content, :role, :timestamp, :error
   
RESET_MESSAGE = '/reset' RESET_MESSAGE = '/reset'
Loading
Loading
Loading
@@ -2,7 +2,7 @@
Loading
@@ -2,7 +2,7 @@
   
module Gitlab module Gitlab
module Llm module Llm
class Cache class ChatStorage
# Expiration time of user messages should not be more than 90 days. # Expiration time of user messages should not be more than 90 days.
# EXPIRE_TIME sets expiration time for the whole chat history stream (not # EXPIRE_TIME sets expiration time for the whole chat history stream (not
# for individual messages) - so the stream is deleted after 3 days since # for individual messages) - so the stream is deleted after 3 days since
Loading
@@ -19,17 +19,14 @@ class Cache
Loading
@@ -19,17 +19,14 @@ class Cache
# sufficient. # sufficient.
MAX_TEXT_LIMIT = 20_000 MAX_TEXT_LIMIT = 20_000
   
ROLE_USER = 'user'
ROLE_ASSISTANT = 'assistant'
ROLE_SYSTEM = 'system'
ALLOWED_ROLES = [ROLE_USER, ROLE_ASSISTANT, ROLE_SYSTEM].freeze
def initialize(user) def initialize(user)
@user = user @user = user
end end
   
def add(payload) def add(payload)
raise ArgumentError, "Invalid role '#{payload[:role]}'" unless ALLOWED_ROLES.include?(payload[:role]) unless ChatMessage::ALLOWED_ROLES.include?(payload[:role])
raise ArgumentError, "Invalid role '#{payload[:role]}'"
end
   
data = { data = {
id: SecureRandom.uuid, id: SecureRandom.uuid,
Loading
@@ -43,16 +40,16 @@ def add(payload)
Loading
@@ -43,16 +40,16 @@ def add(payload)
cache_data(data) cache_data(data)
end end
   
def find_all(filters = {}) def messages(filters = {})
with_redis do |redis| with_redis do |redis|
redis.xrange(key).filter_map do |_id, data| redis.xrange(key).filter_map do |_id, data|
CachedMessage.new(data) if matches_filters?(data, filters) ChatMessage.new(data) if matches_filters?(data, filters)
end end
end end
end end
   
def last_conversation def last_conversation
all = find_all all = messages
idx = all.rindex(&:conversation_reset?) idx = all.rindex(&:conversation_reset?)
return all unless idx return all unless idx
return [] unless idx + 1 < all.size return [] unless idx + 1 < all.size
Loading
Loading
Loading
@@ -20,7 +20,7 @@ def execute
Loading
@@ -20,7 +20,7 @@ def execute
model_name: resource&.class&.name, model_name: resource&.class&.name,
content: response_modifier.response_body, content: response_modifier.response_body,
errors: response_modifier.errors, errors: response_modifier.errors,
role: options[:role] || Cache::ROLE_ASSISTANT, role: options[:role] || ChatMessage::ROLE_ASSISTANT,
timestamp: Time.current, timestamp: Time.current,
type: options.fetch(:type, nil), type: options.fetch(:type, nil),
chunk_id: options.fetch(:chunk_id, nil) chunk_id: options.fetch(:chunk_id, nil)
Loading
@@ -35,7 +35,7 @@ def execute
Loading
@@ -35,7 +35,7 @@ def execute
response_data = data.slice(:request_id, :errors, :role, :content, :timestamp) response_data = data.slice(:request_id, :errors, :role, :content, :timestamp)
   
unless options[:internal_request] unless options[:internal_request]
Gitlab::Llm::Cache.new(user).add(response_data) if options[:cache_response] Gitlab::Llm::ChatStorage.new(user).add(response_data) if options[:cache_response]
   
subscription_arguments = { user_id: user.to_global_id, resource_id: resource&.to_global_id } subscription_arguments = { user_id: user.to_global_id, resource_id: resource&.to_global_id }
if options[:client_subscription_id] if options[:client_subscription_id]
Loading
Loading
Loading
@@ -16,10 +16,10 @@
Loading
@@ -16,10 +16,10 @@
describe '#content_html' do describe '#content_html' do
let_it_be(:current_user) { create(:user) } let_it_be(:current_user) { create(:user) }
   
let(:cached_message) { Gitlab::Llm::CachedMessage.new('content' => "Hello, **World**!", 'timestamp' => '') } let(:message) { Gitlab::Llm::ChatMessage.new('content' => "Hello, **World**!", 'timestamp' => '') }
   
it 'renders html through Banzai' do it 'renders html through Banzai' do
allow(Banzai).to receive(:render_and_post_process).with(cached_message.content, { allow(Banzai).to receive(:render_and_post_process).with(message.content, {
current_user: current_user, current_user: current_user,
only_path: false, only_path: false,
pipeline: :full, pipeline: :full,
Loading
@@ -27,7 +27,7 @@
Loading
@@ -27,7 +27,7 @@
skip_project_check: true skip_project_check: true
}).and_return('banzai_content') }).and_return('banzai_content')
   
resolved_field = resolve_field(:content_html, cached_message, current_user: current_user) resolved_field = resolve_field(:content_html, message, current_user: current_user)
   
expect(resolved_field).to eq('banzai_content') expect(resolved_field).to eq('banzai_content')
end end
Loading
Loading
Loading
@@ -151,11 +151,11 @@
Loading
@@ -151,11 +151,11 @@
   
expect(stream_response_service_double).to receive(:execute).with( expect(stream_response_service_double).to receive(:execute).with(
response: first_response_double, response: first_response_double,
options: { cache_response: false, role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, chunk_id: 1 } options: { cache_response: false, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT, chunk_id: 1 }
) )
expect(stream_response_service_double).to receive(:execute).with( expect(stream_response_service_double).to receive(:execute).with(
response: second_response_double, response: second_response_double,
options: { cache_response: false, role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, chunk_id: 2 } options: { cache_response: false, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT, chunk_id: 2 }
) )
   
agent.execute agent.execute
Loading
@@ -168,26 +168,26 @@
Loading
@@ -168,26 +168,26 @@
allow(agent).to receive(:provider_prompt_class) allow(agent).to receive(:provider_prompt_class)
.and_return(Gitlab::Llm::Chain::Agents::ZeroShot::Prompts::Anthropic) .and_return(Gitlab::Llm::Chain::Agents::ZeroShot::Prompts::Anthropic)
   
Gitlab::Llm::Cache.new(user).add(request_id: 'uuid1', role: 'user', content: 'question 1') Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid1', role: 'user', content: 'question 1')
Gitlab::Llm::Cache.new(user).add(request_id: 'uuid1', role: 'assistant', content: 'response 1') Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid1', role: 'assistant', content: 'response 1')
# this should be ignored because response contains an error # this should be ignored because response contains an error
Gitlab::Llm::Cache.new(user).add(request_id: 'uuid2', role: 'user', content: 'question 2') Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid2', role: 'user', content: 'question 2')
Gitlab::Llm::Cache.new(user) Gitlab::Llm::ChatStorage.new(user)
.add(request_id: 'uuid2', role: 'assistant', content: 'response 2', errors: ['error']) .add(request_id: 'uuid2', role: 'assistant', content: 'response 2', errors: ['error'])
# this should be ignored because it doesn't contain response # this should be ignored because it doesn't contain response
Gitlab::Llm::Cache.new(user).add(request_id: 'uuid3', role: 'user', content: 'question 3') Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid3', role: 'user', content: 'question 3')
   
travel(2.minutes) do travel(2.minutes) do
Gitlab::Llm::Cache.new(user).add(request_id: 'uuid4', role: 'user', content: 'question 4') Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid4', role: 'user', content: 'question 4')
end end
travel(2.minutes) do travel(2.minutes) do
Gitlab::Llm::Cache.new(user).add(request_id: 'uuid5', role: 'user', content: 'question 5') Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid5', role: 'user', content: 'question 5')
end end
travel(3.minutes) do travel(3.minutes) do
Gitlab::Llm::Cache.new(user).add(request_id: 'uuid4', role: 'assistant', content: 'response 4') Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid4', role: 'assistant', content: 'response 4')
end end
travel(4.minutes) do travel(4.minutes) do
Gitlab::Llm::Cache.new(user).add(request_id: 'uuid5', role: 'assistant', content: 'response 5') Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid5', role: 'assistant', content: 'response 5')
end end
end end
   
Loading
@@ -414,7 +414,8 @@
Loading
@@ -414,7 +414,8 @@
uuid = SecureRandom.uuid uuid = SecureRandom.uuid
   
history.each do |message| history.each do |message|
Gitlab::Llm::Cache.new(user).add({ request_id: uuid, role: message[:role], content: message[:content] }) Gitlab::Llm::ChatStorage.new(user).add({ request_id: uuid, role: message[:role],
content: message[:content] })
end end
end end
   
Loading
@@ -542,7 +543,8 @@
Loading
@@ -542,7 +543,8 @@
uuid = SecureRandom.uuid uuid = SecureRandom.uuid
   
history.each do |message| history.each do |message|
Gitlab::Llm::Cache.new(user).add({ request_id: uuid, role: message[:role], content: message[:content] }) Gitlab::Llm::ChatStorage.new(user).add({ request_id: uuid, role: message[:role],
content: message[:content] })
end end
end end
   
Loading
Loading
Loading
@@ -12,16 +12,16 @@
Loading
@@ -12,16 +12,16 @@
user_input: 'foo?', user_input: 'foo?',
agent_scratchpad: "some observation", agent_scratchpad: "some observation",
conversation: [ conversation: [
Gitlab::Llm::CachedMessage.new( Gitlab::Llm::ChatMessage.new(
'request_id' => 'uuid1', 'role' => 'user', 'content' => 'question 1', 'timestamp' => Time.current.to_s 'request_id' => 'uuid1', 'role' => 'user', 'content' => 'question 1', 'timestamp' => Time.current.to_s
), ),
Gitlab::Llm::CachedMessage.new( Gitlab::Llm::ChatMessage.new(
'request_id' => 'uuid1', 'role' => 'assistant', 'content' => 'response 1', 'timestamp' => Time.current.to_s 'request_id' => 'uuid1', 'role' => 'assistant', 'content' => 'response 1', 'timestamp' => Time.current.to_s
), ),
Gitlab::Llm::CachedMessage.new( Gitlab::Llm::ChatMessage.new(
'request_id' => 'uuid1', 'role' => 'user', 'content' => 'question 2', 'timestamp' => Time.current.to_s 'request_id' => 'uuid1', 'role' => 'user', 'content' => 'question 2', 'timestamp' => Time.current.to_s
), ),
Gitlab::Llm::CachedMessage.new( Gitlab::Llm::ChatMessage.new(
'request_id' => 'uuid1', 'role' => 'assistant', 'content' => 'response 2', 'timestamp' => Time.current.to_s 'request_id' => 'uuid1', 'role' => 'assistant', 'content' => 'response 2', 'timestamp' => Time.current.to_s
) )
], ],
Loading
Loading
Loading
@@ -2,7 +2,7 @@
Loading
@@ -2,7 +2,7 @@
   
require 'spec_helper' require 'spec_helper'
   
RSpec.describe Gitlab::Llm::CachedMessage, feature_category: :duo_chat do RSpec.describe Gitlab::Llm::ChatMessage, feature_category: :duo_chat do
let(:timestamp) { Time.current } let(:timestamp) { Time.current }
let(:data) do let(:data) do
{ {
Loading
@@ -19,7 +19,7 @@
Loading
@@ -19,7 +19,7 @@
   
describe '#to_global_id' do describe '#to_global_id' do
it 'returns global ID' do it 'returns global ID' do
expect(subject.to_global_id.to_s).to eq('gid://gitlab/Gitlab::Llm::CachedMessage/uuid') expect(subject.to_global_id.to_s).to eq('gid://gitlab/Gitlab::Llm::ChatMessage/uuid')
end end
end end
   
Loading
Loading
Loading
@@ -2,7 +2,7 @@
Loading
@@ -2,7 +2,7 @@
   
require 'spec_helper' require 'spec_helper'
   
RSpec.describe Gitlab::Llm::Cache, :clean_gitlab_redis_chat, feature_category: :duo_chat do RSpec.describe Gitlab::Llm::ChatStorage, :clean_gitlab_redis_chat, feature_category: :duo_chat do
let_it_be(:user) { create(:user) } let_it_be(:user) { create(:user) }
let(:request_id) { 'uuid' } let(:request_id) { 'uuid' }
let(:timestamp) { Time.current.to_s } let(:timestamp) { Time.current.to_s }
Loading
@@ -30,11 +30,11 @@
Loading
@@ -30,11 +30,11 @@
uuid = 'unique_id' uuid = 'unique_id'
   
expect(SecureRandom).to receive(:uuid).once.and_return(uuid) expect(SecureRandom).to receive(:uuid).once.and_return(uuid)
expect(subject.find_all).to be_empty expect(subject.messages).to be_empty
   
subject.add(payload) subject.add(payload)
   
last = subject.find_all.last last = subject.messages.last
expect(last.id).to eq(uuid) expect(last.id).to eq(uuid)
expect(last.request_id).to eq(request_id) expect(last.request_id).to eq(request_id)
expect(last.errors).to eq(['some error1. another error']) expect(last.errors).to eq(['some error1. another error'])
Loading
@@ -48,7 +48,7 @@
Loading
@@ -48,7 +48,7 @@
   
subject.add(payload) subject.add(payload)
   
last = subject.find_all.last last = subject.messages.last
expect(last.errors).to eq([]) expect(last.errors).to eq([])
end end
   
Loading
@@ -66,23 +66,23 @@
Loading
@@ -66,23 +66,23 @@
   
context 'with MAX_MESSAGES limit' do context 'with MAX_MESSAGES limit' do
before do before do
stub_const('Gitlab::Llm::Cache::MAX_MESSAGES', 2) stub_const('Gitlab::Llm::ChatStorage::MAX_MESSAGES', 2)
end end
   
it 'removes oldes messages if we reach maximum message limit' do it 'removes oldes messages if we reach maximum message limit' do
subject.add(payload.merge(content: 'msg1')) subject.add(payload.merge(content: 'msg1'))
subject.add(payload.merge(content: 'msg2')) subject.add(payload.merge(content: 'msg2'))
   
expect(subject.find_all.map(&:content)).to eq(%w[msg1 msg2]) expect(subject.messages.map(&:content)).to eq(%w[msg1 msg2])
   
subject.add(payload.merge(content: 'msg3')) subject.add(payload.merge(content: 'msg3'))
   
expect(subject.find_all.map(&:content)).to eq(%w[msg2 msg3]) expect(subject.messages.map(&:content)).to eq(%w[msg2 msg3])
end end
end end
end end
   
describe '#find_all' do describe '#messages' do
let(:filters) { {} } let(:filters) { {} }
   
before do before do
Loading
@@ -92,14 +92,14 @@
Loading
@@ -92,14 +92,14 @@
end end
   
it 'returns all records for this user' do it 'returns all records for this user' do
expect(subject.find_all(filters).map(&:content)).to eq(%w[msg1 msg2 msg3]) expect(subject.messages(filters).map(&:content)).to eq(%w[msg1 msg2 msg3])
end end
   
context 'when filtering by role' do context 'when filtering by role' do
let(:filters) { { roles: ['user'] } } let(:filters) { { roles: ['user'] } }
   
it 'returns only records for this role' do it 'returns only records for this role' do
expect(subject.find_all(filters).map(&:content)).to eq(%w[msg1]) expect(subject.messages(filters).map(&:content)).to eq(%w[msg1])
end end
end end
   
Loading
@@ -107,7 +107,7 @@
Loading
@@ -107,7 +107,7 @@
let(:filters) { { request_ids: %w[2 3] } } let(:filters) { { request_ids: %w[2 3] } }
   
it 'returns only records with the same request_id' do it 'returns only records with the same request_id' do
expect(subject.find_all(filters).map(&:content)).to eq(%w[msg2 msg3]) expect(subject.messages(filters).map(&:content)).to eq(%w[msg2 msg3])
end end
end end
end end
Loading
Loading
Loading
@@ -78,7 +78,7 @@
Loading
@@ -78,7 +78,7 @@
id: anything, id: anything,
model_name: vulnerability.class.name, model_name: vulnerability.class.name,
content: '', content: '',
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
request_id: 'uuid', request_id: 'uuid',
errors: [described_class::NULL_PROMPT_ERROR] errors: [described_class::NULL_PROMPT_ERROR]
})) }))
Loading
@@ -103,7 +103,7 @@
Loading
@@ -103,7 +103,7 @@
id: anything, id: anything,
model_name: vulnerability.class.name, model_name: vulnerability.class.name,
content: '', content: '',
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
request_id: 'uuid', request_id: 'uuid',
errors: errors[llm_client] errors: errors[llm_client]
})) }))
Loading
@@ -133,7 +133,7 @@
Loading
@@ -133,7 +133,7 @@
id: anything, id: anything,
model_name: vulnerability.class.name, model_name: vulnerability.class.name,
content: example_answer, content: example_answer,
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
request_id: 'uuid', request_id: 'uuid',
errors: [] errors: []
})) }))
Loading
@@ -162,7 +162,7 @@
Loading
@@ -162,7 +162,7 @@
id: anything, id: anything,
model_name: vulnerability.class.name, model_name: vulnerability.class.name,
content: '', content: '',
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
request_id: 'uuid', request_id: 'uuid',
errors: [described_class::DEFAULT_ERROR] errors: [described_class::DEFAULT_ERROR]
})) }))
Loading
@@ -192,7 +192,7 @@
Loading
@@ -192,7 +192,7 @@
id: anything, id: anything,
model_name: vulnerability.class.name, model_name: vulnerability.class.name,
content: '', content: '',
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
request_id: 'uuid', request_id: 'uuid',
errors: [described_class::CLIENT_TIMEOUT_ERROR] errors: [described_class::CLIENT_TIMEOUT_ERROR]
})) }))
Loading
@@ -219,7 +219,7 @@
Loading
@@ -219,7 +219,7 @@
id: anything, id: anything,
model_name: vulnerability.class.name, model_name: vulnerability.class.name,
content: example_answer, content: example_answer,
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
request_id: 'uuid', request_id: 'uuid',
errors: [] errors: []
})).twice })).twice
Loading
@@ -251,7 +251,7 @@
Loading
@@ -251,7 +251,7 @@
id: anything, id: anything,
model_name: vulnerability.class.name, model_name: vulnerability.class.name,
content: example_answer, content: example_answer,
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
request_id: 'uuid', request_id: 'uuid',
errors: [] errors: []
}) })
Loading
Loading
Loading
@@ -86,7 +86,7 @@
Loading
@@ -86,7 +86,7 @@
let(:cache_response) { true } let(:cache_response) { true }
   
it 'caches response' do it 'caches response' do
expect_next_instance_of(::Gitlab::Llm::Cache) do |cache| expect_next_instance_of(::Gitlab::Llm::ChatStorage) do |cache|
expect(cache).to receive(:add) expect(cache).to receive(:add)
.with(payload.slice(:request_id, :errors, :role, :timestamp).merge(content: payload[:content])) .with(payload.slice(:request_id, :errors, :role, :timestamp).merge(content: payload[:content]))
end end
Loading
@@ -99,7 +99,7 @@
Loading
@@ -99,7 +99,7 @@
let(:cache_response) { false } let(:cache_response) { false }
   
it 'does not cache the response' do it 'does not cache the response' do
expect(Gitlab::Llm::Cache).not_to receive(:new) expect(Gitlab::Llm::ChatStorage).not_to receive(:new)
   
subject subject
end end
Loading
@@ -151,7 +151,7 @@
Loading
@@ -151,7 +151,7 @@
   
it 'returns response but does not cache or broadcast' do it 'returns response but does not cache or broadcast' do
expect(GraphqlTriggers).not_to receive(:ai_completion_response) expect(GraphqlTriggers).not_to receive(:ai_completion_response)
expect(Gitlab::Llm::Cache).not_to receive(:new) expect(Gitlab::Llm::ChatStorage).not_to receive(:new)
   
expect(subject[:content]).to eq(response_body) expect(subject[:content]).to eq(response_body)
end end
Loading
Loading
Loading
@@ -30,7 +30,7 @@
Loading
@@ -30,7 +30,7 @@
end end
   
it 'returns :unknwon for other classes' do it 'returns :unknwon for other classes' do
expect(described_class.client_label(Gitlab::Llm::Cache)).to eq(:unknown) expect(described_class.client_label(Gitlab::Llm::ChatStorage)).to eq(:unknown)
end end
end end
end end
Loading
@@ -36,10 +36,10 @@
Loading
@@ -36,10 +36,10 @@
subject { graphql_data.dig('aiMessages', 'nodes') } subject { graphql_data.dig('aiMessages', 'nodes') }
   
before do before do
::Gitlab::Llm::Cache.new(user).add(request_id: 'uuid1', role: 'user', content: 'question 1') ::Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid1', role: 'user', content: 'question 1')
::Gitlab::Llm::Cache.new(user).add(request_id: 'uuid1', role: 'assistant', content: response_content) ::Gitlab::Llm::ChatStorage.new(user).add(request_id: 'uuid1', role: 'assistant', content: response_content)
# should not be included in response because it's for other user # should not be included in response because it's for other user
::Gitlab::Llm::Cache.new(other_user).add(request_id: 'uuid1', role: 'user', content: 'question 2') ::Gitlab::Llm::ChatStorage.new(other_user).add(request_id: 'uuid1', role: 'user', content: 'question 2')
end end
   
context 'when user is not logged in' do context 'when user is not logged in' do
Loading
Loading
Loading
@@ -41,7 +41,7 @@
Loading
@@ -41,7 +41,7 @@
model_name: resource.class.name, model_name: resource.class.name,
request_id: request_id, request_id: request_id,
content: content, content: content,
role: ::Gitlab::Llm::Cache::ROLE_ASSISTANT, role: ::Gitlab::Llm::ChatMessage::ROLE_ASSISTANT,
errors: [], errors: [],
chunk_id: nil chunk_id: nil
} }
Loading
Loading
Loading
@@ -15,7 +15,7 @@
Loading
@@ -15,7 +15,7 @@
end end
   
it 'caches response' do it 'caches response' do
expect_next_instance_of(::Gitlab::Llm::Cache) do |cache| expect_next_instance_of(::Gitlab::Llm::ChatStorage) do |cache|
expect(cache).to receive(:add).with(expected_cache_payload) expect(cache).to receive(:add).with(expected_cache_payload)
end end
   
Loading
@@ -32,7 +32,7 @@
Loading
@@ -32,7 +32,7 @@
it 'only stores the message in cache' do it 'only stores the message in cache' do
expect(::Llm::CompletionWorker).not_to receive(:perform_async) expect(::Llm::CompletionWorker).not_to receive(:perform_async)
   
expect_next_instance_of(::Gitlab::Llm::Cache) do |cache| expect_next_instance_of(::Gitlab::Llm::ChatStorage) do |cache|
expect(cache).to receive(:add).with(expected_cache_payload) expect(cache).to receive(:add).with(expected_cache_payload)
end end
   
Loading
@@ -43,7 +43,7 @@
Loading
@@ -43,7 +43,7 @@
   
RSpec.shared_examples 'llm service does not cache user request' do RSpec.shared_examples 'llm service does not cache user request' do
it 'does not cache the request' do it 'does not cache the request' do
expect(::Gitlab::Llm::Cache).not_to receive(:new) expect(::Gitlab::Llm::ChatStorage).not_to receive(:new)
   
subject.execute subject.execute
end end
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment