Skip to content
Snippets Groups Projects
Commit 0d458b96 authored by Micael Bergeron's avatar Micael Bergeron
Browse files

remove geo specific code

parent 0e732fa4
No related branches found
No related tags found
No related merge requests found
Showing
with 76 additions and 1493 deletions
class Projects::JobsController < Projects::ApplicationController
prepend EE::Projects::JobsController
before_action :build, except: [:index, :cancel_all]
 
before_action :authorize_read_build!,
Loading
Loading
@@ -119,11 +117,17 @@ class Projects::JobsController < Projects::ApplicationController
end
 
def raw
build.trace.read do |stream|
if stream.file?
send_file stream.path, type: 'text/plain; charset=utf-8', disposition: 'inline'
else
render_404
if trace_artifact_file
send_upload(trace_artifact_file,
send_params: raw_send_params,
redirect_params: raw_redirect_params)
else
build.trace.read do |stream|
if stream.file?
send_file stream.path, type: 'text/plain; charset=utf-8', disposition: 'inline'
else
render_404
end
end
end
end
Loading
Loading
@@ -138,9 +142,21 @@ class Projects::JobsController < Projects::ApplicationController
return access_denied! unless can?(current_user, :erase_build, build)
end
 
def raw_send_params
{ type: 'text/plain; charset=utf-8', disposition: 'inline' }
end
def raw_redirect_params
{ query: { 'response-content-type' => 'text/plain; charset=utf-8', 'response-content-disposition' => 'inline' } }
end
def trace_artifact_file
@trace_artifact_file ||= build.job_artifacts_trace&.file
end
def build
@build ||= project.builds.find(params[:id])
.present(current_user: current_user)
.present(current_user: current_user)
end
 
def build_path(build)
Loading
Loading
Loading
Loading
@@ -15,7 +15,7 @@ class LfsObject < ActiveRecord::Base
 
after_save if: :file_changed?, on: [:create, :update] do
run_after_commit do
file.schedule_migration_to_object_storage
file.schedule_background_upload
end
end
 
Loading
Loading
class JobArtifactUploader < GitlabUploader
prepend EE::JobArtifactUploader
extend Workhorse::UploadPath
include ObjectStorage::Concern
 
Loading
Loading
Loading
Loading
@@ -11,7 +11,7 @@
#
# It's strongly recommended that you check this file into your version control system.
 
ActiveRecord::Schema.define(version: 20180213131630) do
ActiveRecord::Schema.define(version: 20180216121030) do
 
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
Loading
Loading
require 'spec_helper'
describe Geo::AttachmentRegistryFinder, :geo do
include ::EE::GeoHelpers
let(:secondary) { create(:geo_node) }
let(:synced_group) { create(:group) }
let(:synced_subgroup) { create(:group, parent: synced_group) }
let(:unsynced_group) { create(:group) }
let(:synced_project) { create(:project, group: synced_group) }
let(:unsynced_project) { create(:project, group: unsynced_group, repository_storage: 'broken') }
let!(:upload_1) { create(:upload, model: synced_group) }
let!(:upload_2) { create(:upload, model: unsynced_group) }
let!(:upload_3) { create(:upload, :issuable_upload, model: synced_project) }
let!(:upload_4) { create(:upload, model: unsynced_project) }
let(:upload_5) { create(:upload, model: synced_project) }
let(:upload_6) { create(:upload, :personal_snippet_upload) }
let(:upload_7) { create(:upload, model: synced_subgroup) }
let(:lfs_object) { create(:lfs_object) }
subject { described_class.new(current_node: secondary) }
before do
stub_current_geo_node(secondary)
end
# Disable transactions via :delete method because a foreign table
# can't see changes inside a transaction of a different connection.
context 'FDW', :delete do
before do
skip('FDW is not configured') if Gitlab::Database.postgresql? && !Gitlab::Geo.fdw?
end
describe '#find_synced_attachments' do
it 'delegates to #fdw_find_synced_attachments' do
expect(subject).to receive(:fdw_find_synced_attachments).and_call_original
subject.find_synced_attachments
end
it 'returns synced avatars, attachment, personal snippets and files' do
create(:geo_file_registry, :avatar, file_id: upload_1.id)
create(:geo_file_registry, :avatar, file_id: upload_2.id)
create(:geo_file_registry, :avatar, file_id: upload_3.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_6.id)
create(:geo_file_registry, :avatar, file_id: upload_7.id)
create(:geo_file_registry, :lfs, file_id: lfs_object.id)
synced_attachments = subject.find_synced_attachments
expect(synced_attachments.pluck(:id)).to match_array([upload_1.id, upload_2.id, upload_6.id, upload_7.id])
end
context 'with selective sync' do
it 'falls back to legacy queries' do
secondary.update!(selective_sync_type: 'namespaces', namespaces: [synced_group])
expect(subject).to receive(:legacy_find_synced_attachments)
subject.find_synced_attachments
end
end
end
describe '#find_failed_attachments' do
it 'delegates to #fdw_find_failed_attachments' do
expect(subject).to receive(:fdw_find_failed_attachments).and_call_original
subject.find_failed_attachments
end
it 'returns failed avatars, attachment, personal snippets and files' do
create(:geo_file_registry, :avatar, file_id: upload_1.id)
create(:geo_file_registry, :avatar, file_id: upload_2.id)
create(:geo_file_registry, :avatar, file_id: upload_3.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_6.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_7.id, success: false)
create(:geo_file_registry, :lfs, file_id: lfs_object.id, success: false)
failed_attachments = subject.find_failed_attachments
expect(failed_attachments.pluck(:id)).to match_array([upload_3.id, upload_6.id, upload_7.id])
end
context 'with selective sync' do
it 'falls back to legacy queries' do
secondary.update!(selective_sync_type: 'namespaces', namespaces: [synced_group])
expect(subject).to receive(:legacy_find_failed_attachments)
subject.find_failed_attachments
end
end
end
describe '#find_unsynced_attachments' do
it 'delegates to #fdw_find_unsynced_attachments' do
expect(subject).to receive(:fdw_find_unsynced_attachments).and_call_original
subject.find_unsynced_attachments(batch_size: 10)
end
it 'returns uploads without an entry on the tracking database' do
create(:geo_file_registry, :avatar, file_id: upload_1.id, success: true)
uploads = subject.find_unsynced_attachments(batch_size: 10)
expect(uploads.map(&:id)).to match_array([upload_2.id, upload_3.id, upload_4.id])
end
it 'excludes uploads without an entry on the tracking database' do
create(:geo_file_registry, :avatar, file_id: upload_1.id, success: true)
uploads = subject.find_unsynced_attachments(batch_size: 10, except_registry_ids: [upload_2.id])
expect(uploads.map(&:id)).to match_array([upload_3.id, upload_4.id])
end
end
end
context 'Legacy' do
before do
allow(Gitlab::Geo).to receive(:fdw?).and_return(false)
end
describe '#find_synced_attachments' do
it 'delegates to #legacy_find_synced_attachments' do
expect(subject).to receive(:legacy_find_synced_attachments).and_call_original
subject.find_synced_attachments
end
it 'returns synced avatars, attachment, personal snippets and files' do
create(:geo_file_registry, :avatar, file_id: upload_1.id)
create(:geo_file_registry, :avatar, file_id: upload_2.id)
create(:geo_file_registry, :avatar, file_id: upload_3.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_6.id)
create(:geo_file_registry, :avatar, file_id: upload_7.id)
create(:geo_file_registry, :lfs, file_id: lfs_object.id)
synced_attachments = subject.find_synced_attachments
expect(synced_attachments).to match_array([upload_1, upload_2, upload_6, upload_7])
end
context 'with selective sync by namespace' do
it 'returns synced avatars, attachment, personal snippets and files' do
create(:geo_file_registry, :avatar, file_id: upload_1.id)
create(:geo_file_registry, :avatar, file_id: upload_2.id)
create(:geo_file_registry, :avatar, file_id: upload_3.id)
create(:geo_file_registry, :avatar, file_id: upload_4.id)
create(:geo_file_registry, :avatar, file_id: upload_5.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_6.id)
create(:geo_file_registry, :avatar, file_id: upload_7.id)
create(:geo_file_registry, :lfs, file_id: lfs_object.id)
secondary.update!(selective_sync_type: 'namespaces', namespaces: [synced_group])
synced_attachments = subject.find_synced_attachments
expect(synced_attachments).to match_array([upload_1, upload_3, upload_6, upload_7])
end
end
context 'with selective sync by shard' do
it 'returns synced avatars, attachment, personal snippets and files' do
create(:geo_file_registry, :avatar, file_id: upload_1.id)
create(:geo_file_registry, :avatar, file_id: upload_2.id)
create(:geo_file_registry, :avatar, file_id: upload_3.id)
create(:geo_file_registry, :avatar, file_id: upload_4.id)
create(:geo_file_registry, :avatar, file_id: upload_5.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_6.id)
create(:geo_file_registry, :avatar, file_id: upload_7.id)
create(:geo_file_registry, :lfs, file_id: lfs_object.id)
secondary.update!(selective_sync_type: 'shards', selective_sync_shards: ['default'])
synced_attachments = subject.find_synced_attachments
expect(synced_attachments).to match_array([upload_1, upload_3, upload_6])
end
end
end
describe '#find_failed_attachments' do
it 'delegates to #legacy_find_failed_attachments' do
expect(subject).to receive(:legacy_find_failed_attachments).and_call_original
subject.find_failed_attachments
end
it 'returns failed avatars, attachment, personal snippets and files' do
create(:geo_file_registry, :avatar, file_id: upload_1.id)
create(:geo_file_registry, :avatar, file_id: upload_2.id)
create(:geo_file_registry, :avatar, file_id: upload_3.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_6.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_7.id, success: false)
create(:geo_file_registry, :lfs, file_id: lfs_object.id, success: false)
failed_attachments = subject.find_failed_attachments
expect(failed_attachments).to match_array([upload_3, upload_6, upload_7])
end
context 'with selective sync by namespace' do
it 'returns failed avatars, attachment, personal snippets and files' do
create(:geo_file_registry, :avatar, file_id: upload_1.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_2.id)
create(:geo_file_registry, :avatar, file_id: upload_3.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_4.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_5.id)
create(:geo_file_registry, :avatar, file_id: upload_6.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_7.id, success: false)
create(:geo_file_registry, :lfs, file_id: lfs_object.id, success: false)
secondary.update!(selective_sync_type: 'namespaces', namespaces: [synced_group])
failed_attachments = subject.find_failed_attachments
expect(failed_attachments).to match_array([upload_1, upload_3, upload_6, upload_7])
end
end
context 'with selective sync by shard' do
it 'returns failed avatars, attachment, personal snippets and files' do
create(:geo_file_registry, :avatar, file_id: upload_1.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_2.id)
create(:geo_file_registry, :avatar, file_id: upload_3.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_4.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_5.id)
create(:geo_file_registry, :avatar, file_id: upload_6.id, success: false)
create(:geo_file_registry, :avatar, file_id: upload_7.id, success: false)
create(:geo_file_registry, :lfs, file_id: lfs_object.id, success: false)
secondary.update!(selective_sync_type: 'shards', selective_sync_shards: ['default'])
failed_attachments = subject.find_failed_attachments
expect(failed_attachments).to match_array([upload_1, upload_3, upload_6])
end
end
end
describe '#find_unsynced_attachments' do
it 'delegates to #legacy_find_unsynced_attachments' do
expect(subject).to receive(:legacy_find_unsynced_attachments).and_call_original
subject.find_unsynced_attachments(batch_size: 10)
end
it 'returns LFS objects without an entry on the tracking database' do
create(:geo_file_registry, :avatar, file_id: upload_1.id, success: true)
uploads = subject.find_unsynced_attachments(batch_size: 10)
expect(uploads).to match_array([upload_2, upload_3, upload_4])
end
it 'excludes uploads without an entry on the tracking database' do
create(:geo_file_registry, :avatar, file_id: upload_1.id, success: true)
uploads = subject.find_unsynced_attachments(batch_size: 10, except_registry_ids: [upload_2.id])
expect(uploads).to match_array([upload_3, upload_4])
end
end
end
end
require 'spec_helper'
describe Gitlab::Geo::FileTransfer do
let(:user) { create(:user, avatar: fixture_file_upload(Rails.root + 'spec/fixtures/dk.png', 'image/png')) }
let(:upload) { Upload.find_by(model: user, uploader: 'AvatarUploader') }
subject { described_class.new(:file, upload) }
describe '#execute' do
context 'user avatar' do
it 'sets an absolute path' do
expect(subject.file_type).to eq(:file)
expect(subject.file_id).to eq(upload.id)
expect(subject.filename).to eq(upload.absolute_path)
expect(Pathname.new(subject.filename).absolute?).to be_truthy
expect(subject.request_data).to eq({ id: upload.model_id,
type: 'User',
checksum: upload.checksum })
end
end
end
end
require 'spec_helper'
describe Gitlab::Geo::LogCursor::Daemon, :postgresql, :clean_gitlab_redis_shared_state do
include ::EE::GeoHelpers
set(:primary) { create(:geo_node, :primary) }
set(:secondary) { create(:geo_node) }
let(:options) { {} }
subject(:daemon) { described_class.new(options) }
around do |example|
Sidekiq::Testing.fake! { example.run }
end
before do
stub_current_geo_node(secondary)
allow(daemon).to receive(:trap_signals)
allow(daemon).to receive(:arbitrary_sleep).and_return(0.1)
end
describe '#run!' do
it 'traps signals' do
is_expected.to receive(:exit?).and_return(true)
is_expected.to receive(:trap_signals)
daemon.run!
end
it 'delegates to #run_once! in a loop' do
is_expected.to receive(:exit?).and_return(false, false, false, true)
is_expected.to receive(:run_once!).twice
daemon.run!
end
it 'skips execution if cannot achieve a lease' do
is_expected.to receive(:exit?).and_return(false, true)
is_expected.not_to receive(:run_once!)
expect_any_instance_of(Gitlab::ExclusiveLease).to receive(:try_obtain_with_ttl).and_return({ ttl: 1, uuid: false })
daemon.run!
end
it 'skips execution if not a Geo node' do
stub_current_geo_node(nil)
is_expected.to receive(:exit?).and_return(false, true)
is_expected.to receive(:sleep).with(1.minute)
is_expected.not_to receive(:run_once!)
daemon.run!
end
it 'skips execution if the current node is a primary' do
stub_current_geo_node(primary)
is_expected.to receive(:exit?).and_return(false, true)
is_expected.to receive(:sleep).with(1.minute)
is_expected.not_to receive(:run_once!)
daemon.run!
end
end
describe '#run_once!' do
context 'when replaying a repository created event' do
let(:project) { create(:project) }
let(:repository_created_event) { create(:geo_repository_created_event, project: project) }
let(:event_log) { create(:geo_event_log, repository_created_event: repository_created_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
it 'creates a new project registry' do
expect { daemon.run_once! }.to change(Geo::ProjectRegistry, :count).by(1)
end
it 'sets resync attributes to true' do
daemon.run_once!
registry = Geo::ProjectRegistry.last
expect(registry).to have_attributes(project_id: project.id, resync_repository: true, resync_wiki: true)
end
it 'sets resync_wiki to false if wiki_path is nil' do
repository_created_event.update!(wiki_path: nil)
daemon.run_once!
registry = Geo::ProjectRegistry.last
expect(registry).to have_attributes(project_id: project.id, resync_repository: true, resync_wiki: false)
end
it 'performs Geo::ProjectSyncWorker' do
expect(Geo::ProjectSyncWorker).to receive(:perform_async)
.with(project.id, anything).once
daemon.run_once!
end
end
context 'when replaying a repository updated event' do
let(:project) { create(:project) }
let(:repository_updated_event) { create(:geo_repository_updated_event, project: project) }
let(:event_log) { create(:geo_event_log, repository_updated_event: repository_updated_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
it 'creates a new project registry if it does not exist' do
expect { daemon.run_once! }.to change(Geo::ProjectRegistry, :count).by(1)
end
it 'sets resync_repository to true if event source is repository' do
repository_updated_event.update!(source: Geo::RepositoryUpdatedEvent::REPOSITORY)
registry = create(:geo_project_registry, :synced, project: repository_updated_event.project)
daemon.run_once!
expect(registry.reload.resync_repository).to be true
end
it 'sets resync_wiki to true if event source is wiki' do
repository_updated_event.update!(source: Geo::RepositoryUpdatedEvent::WIKI)
registry = create(:geo_project_registry, :synced, project: repository_updated_event.project)
daemon.run_once!
expect(registry.reload.resync_wiki).to be true
end
it 'performs Geo::ProjectSyncWorker' do
expect(Geo::ProjectSyncWorker).to receive(:perform_async)
.with(project.id, anything).once
daemon.run_once!
end
end
context 'when replaying a repository deleted event' do
let(:event_log) { create(:geo_event_log, :deleted_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
let(:repository_deleted_event) { event_log.repository_deleted_event }
let(:project) { repository_deleted_event.project }
it 'does not create a tracking database entry' do
expect { daemon.run_once! }.not_to change(Geo::ProjectRegistry, :count)
end
it 'schedules a GeoRepositoryDestroyWorker' do
project_id = repository_deleted_event.project_id
project_name = repository_deleted_event.deleted_project_name
project_path = repository_deleted_event.deleted_path
expect(::GeoRepositoryDestroyWorker).to receive(:perform_async)
.with(project_id, project_name, project_path, project.repository_storage)
daemon.run_once!
end
it 'removes the tracking database entry if exist' do
create(:geo_project_registry, :synced, project: project)
expect { daemon.run_once! }.to change(Geo::ProjectRegistry, :count).by(-1)
end
end
context 'when replaying a repositories changed event' do
let(:repositories_changed_event) { create(:geo_repositories_changed_event, geo_node: secondary) }
let(:event_log) { create(:geo_event_log, repositories_changed_event: repositories_changed_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
it 'schedules a GeoRepositoryDestroyWorker when event node is the current node' do
expect(Geo::RepositoriesCleanUpWorker).to receive(:perform_in).with(within(5.minutes).of(1.hour), secondary.id)
daemon.run_once!
end
it 'does not schedule a GeoRepositoryDestroyWorker when event node is not the current node' do
stub_current_geo_node(build(:geo_node))
expect(Geo::RepositoriesCleanUpWorker).not_to receive(:perform_in)
daemon.run_once!
end
end
context 'when node has namespace restrictions' do
let(:group_1) { create(:group) }
let(:group_2) { create(:group) }
let(:project) { create(:project, group: group_1) }
let(:repository_updated_event) { create(:geo_repository_updated_event, project: project) }
let(:event_log) { create(:geo_event_log, repository_updated_event: repository_updated_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
before do
allow(Geo::ProjectSyncWorker).to receive(:perform_async)
end
it 'replays events for projects that belong to selected namespaces to replicate' do
secondary.update!(namespaces: [group_1])
expect { daemon.run_once! }.to change(Geo::ProjectRegistry, :count).by(1)
end
it 'does not replay events for projects that do not belong to selected namespaces to replicate' do
secondary.update!(selective_sync_type: 'namespaces', namespaces: [group_2])
expect { daemon.run_once! }.not_to change(Geo::ProjectRegistry, :count)
end
it 'does not replay events for projects that do not belong to selected shards to replicate' do
secondary.update!(selective_sync_type: 'shards', selective_sync_shards: ['broken'])
expect { daemon.run_once! }.not_to change(Geo::ProjectRegistry, :count)
end
end
context 'when processing a repository renamed event' do
let(:event_log) { create(:geo_event_log, :renamed_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
let(:repository_renamed_event) { event_log.repository_renamed_event }
it 'does not create a new project registry' do
expect { daemon.run_once! }.not_to change(Geo::ProjectRegistry, :count)
end
it 'schedules a Geo::RenameRepositoryWorker' do
project_id = repository_renamed_event.project_id
old_path_with_namespace = repository_renamed_event.old_path_with_namespace
new_path_with_namespace = repository_renamed_event.new_path_with_namespace
expect(::Geo::RenameRepositoryWorker).to receive(:perform_async)
.with(project_id, old_path_with_namespace, new_path_with_namespace)
daemon.run_once!
end
end
context 'when processing a hashed storage migration event' do
let(:event_log) { create(:geo_event_log, :hashed_storage_migration_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
let(:hashed_storage_migrated_event) { event_log.hashed_storage_migrated_event }
it 'does not create a new project registry' do
expect { daemon.run_once! }.not_to change(Geo::ProjectRegistry, :count)
end
it 'schedules a Geo::HashedStorageMigrationWorker' do
project = hashed_storage_migrated_event.project
old_disk_path = hashed_storage_migrated_event.old_disk_path
new_disk_path = hashed_storage_migrated_event.new_disk_path
old_storage_version = project.storage_version
expect(::Geo::HashedStorageMigrationWorker).to receive(:perform_async)
.with(project.id, old_disk_path, new_disk_path, old_storage_version)
daemon.run_once!
end
end
context 'when processing an attachment migration event to hashed storage' do
let(:event_log) { create(:geo_event_log, :hashed_storage_attachments_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
let(:hashed_storage_attachments_event) { event_log.hashed_storage_attachments_event }
it 'does not create a new project registry' do
expect { daemon.run_once! }.not_to change(Geo::ProjectRegistry, :count)
end
it 'schedules a Geo::HashedStorageAttachmentsMigrationWorker' do
project = hashed_storage_attachments_event.project
old_attachments_path = hashed_storage_attachments_event.old_attachments_path
new_attachments_path = hashed_storage_attachments_event.new_attachments_path
expect(::Geo::HashedStorageAttachmentsMigrationWorker).to receive(:perform_async)
.with(project.id, old_attachments_path, new_attachments_path)
daemon.run_once!
end
end
context 'when replaying a LFS object deleted event' do
let(:event_log) { create(:geo_event_log, :lfs_object_deleted_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
let(:lfs_object_deleted_event) { event_log.lfs_object_deleted_event }
let(:lfs_object) { lfs_object_deleted_event.lfs_object }
it 'does not create a tracking database entry' do
expect { daemon.run_once! }.not_to change(Geo::FileRegistry, :count)
end
it 'schedules a Geo::FileRemovalWorker' do
file_path = File.join(LfsObjectUploader.root, lfs_object_deleted_event.file_path)
expect(::Geo::FileRemovalWorker).to receive(:perform_async)
.with(file_path)
daemon.run_once!
end
it 'removes the tracking database entry if exist' do
create(:geo_file_registry, :lfs, file_id: lfs_object.id)
expect { daemon.run_once! }.to change(Geo::FileRegistry.lfs_objects, :count).by(-1)
end
end
context 'when replaying a job artifact event' do
let(:event_log) { create(:geo_event_log, :job_artifact_deleted_event) }
let!(:event_log_state) { create(:geo_event_log_state, event_id: event_log.id - 1) }
let(:job_artifact_deleted_event) { event_log.job_artifact_deleted_event }
let(:job_artifact) { job_artifact_deleted_event.job_artifact }
context 'with a tracking database entry' do
before do
create(:geo_file_registry, :job_artifact, file_id: job_artifact.id)
end
context 'with a file' do
context 'when the delete succeeds' do
it 'removes the tracking database entry' do
expect { daemon.run_once! }.to change(Geo::FileRegistry.job_artifacts, :count).by(-1)
end
it 'deletes the file' do
expect { daemon.run_once! }.to change { File.exist?(job_artifact.file.path) }.from(true).to(false)
end
end
context 'when the delete fails' do
before do
expect(daemon).to receive(:delete_file).and_return(false)
end
it 'does not remove the tracking database entry' do
expect { daemon.run_once! }.not_to change(Geo::FileRegistry.job_artifacts, :count)
end
end
end
context 'without a file' do
before do
FileUtils.rm(job_artifact.file.path)
end
it 'removes the tracking database entry' do
expect { daemon.run_once! }.to change(Geo::FileRegistry.job_artifacts, :count).by(-1)
end
end
end
context 'without a tracking database entry' do
it 'does not create a tracking database entry' do
expect { daemon.run_once! }.not_to change(Geo::FileRegistry, :count)
end
it 'does not delete the file (yet, due to possible race condition)' do
expect { daemon.run_once! }.not_to change { File.exist?(job_artifact.file.path) }.from(true)
end
end
end
end
describe '#delete_file' do
context 'when the file exists' do
let!(:file) { fixture_file_upload(Rails.root + "spec/fixtures/dk.png", "`/png") }
context 'when the delete does not raise an exception' do
it 'returns true' do
expect(daemon.send(:delete_file, file.path)).to be_truthy
end
it 'does not log an error' do
expect(daemon).not_to receive(:logger)
daemon.send(:delete_file, file.path)
end
end
context 'when the delete raises an exception' do
before do
expect(File).to receive(:delete).and_raise('something went wrong')
end
it 'returns false' do
expect(daemon.send(:delete_file, file.path)).to be_falsey
end
it 'logs an error' do
logger = double(logger)
expect(daemon).to receive(:logger).and_return(logger)
expect(logger).to receive(:error).with('Failed to remove file', exception: 'RuntimeError', details: 'something went wrong', filename: file.path)
daemon.send(:delete_file, file.path)
end
end
end
context 'when the file does not exist' do
it 'returns false' do
expect(daemon.send(:delete_file, '/does/not/exist')).to be_falsey
end
it 'logs an error' do
logger = double(logger)
expect(daemon).to receive(:logger).and_return(logger)
expect(logger).to receive(:error).with('Failed to remove file', exception: 'Errno::ENOENT', details: 'No such file or directory @ unlink_internal - /does/not/exist', filename: '/does/not/exist')
daemon.send(:delete_file, '/does/not/exist')
end
end
end
end
require 'spec_helper'
describe Geo::FileDownloadService do
include ::EE::GeoHelpers
set(:primary) { create(:geo_node, :primary) }
set(:secondary) { create(:geo_node) }
before do
stub_current_geo_node(secondary)
allow_any_instance_of(Gitlab::ExclusiveLease).to receive(:try_obtain).and_return(true)
end
describe '#execute' do
context 'user avatar' do
let(:user) { create(:user, avatar: fixture_file_upload(Rails.root + 'spec/fixtures/dk.png', 'image/png')) }
let(:upload) { Upload.find_by(model: user, uploader: 'AvatarUploader') }
subject(:execute!) { described_class.new(:avatar, upload.id).execute }
it 'downloads a user avatar' do
stub_transfer(Gitlab::Geo::FileTransfer, 100)
expect { execute! }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::FileTransfer, -1)
expect { execute! }.to change { Geo::FileRegistry.failed.count }.by(1)
expect(Geo::FileRegistry.last.retry_count).to eq(1)
expect(Geo::FileRegistry.last.retry_at).to be_present
end
it 'registers when the download fails with some other error' do
stub_transfer(Gitlab::Geo::FileTransfer, nil)
expect { execute! }.to change { Geo::FileRegistry.failed.count }.by(1)
end
end
context 'group avatar' do
let(:group) { create(:group, avatar: fixture_file_upload(Rails.root + 'spec/fixtures/dk.png', 'image/png')) }
let(:upload) { Upload.find_by(model: group, uploader: 'AvatarUploader') }
subject(:execute!) { described_class.new(:avatar, upload.id).execute }
it 'downloads a group avatar' do
stub_transfer(Gitlab::Geo::FileTransfer, 100)
expect { execute! }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::FileTransfer, -1)
expect { execute! }.to change { Geo::FileRegistry.failed.count }.by(1)
end
end
context 'project avatar' do
let(:project) { create(:project, avatar: fixture_file_upload(Rails.root + 'spec/fixtures/dk.png', 'image/png')) }
let(:upload) { Upload.find_by(model: project, uploader: 'AvatarUploader') }
subject(:execute!) { described_class.new(:avatar, upload.id).execute }
it 'downloads a project avatar' do
stub_transfer(Gitlab::Geo::FileTransfer, 100)
expect { execute! }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::FileTransfer, -1)
expect { execute! }.to change { Geo::FileRegistry.failed.count }.by(1)
end
end
context 'with an attachment' do
let(:note) { create(:note, :with_attachment) }
let(:upload) { Upload.find_by(model: note, uploader: 'AttachmentUploader') }
subject(:execute!) { described_class.new(:attachment, upload.id).execute }
it 'downloads the attachment' do
stub_transfer(Gitlab::Geo::FileTransfer, 100)
expect { execute! }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::FileTransfer, -1)
expect { execute! }.to change { Geo::FileRegistry.failed.count }.by(1)
end
end
context 'with a snippet' do
let(:upload) { create(:upload, :personal_snippet_upload) }
subject(:execute!) { described_class.new(:personal_file, upload.id).execute }
it 'downloads the file' do
stub_transfer(Gitlab::Geo::FileTransfer, 100)
expect { execute! }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::FileTransfer, -1)
expect { execute! }.to change { Geo::FileRegistry.failed.count }.by(1)
end
end
context 'with file upload' do
let(:project) { create(:project) }
let(:upload) { Upload.find_by(model: project, uploader: 'FileUploader') }
subject { described_class.new(:file, upload.id) }
before do
FileUploader.new(project).store!(fixture_file_upload(Rails.root + 'spec/fixtures/dk.png', 'image/png'))
end
it 'downloads the file' do
stub_transfer(Gitlab::Geo::FileTransfer, 100)
expect { subject.execute }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::FileTransfer, -1)
expect { subject.execute }.to change { Geo::FileRegistry.failed.count }.by(1)
end
end
context 'with namespace file upload' do
let(:group) { create(:group) }
let(:upload) { Upload.find_by(model: group, uploader: 'NamespaceFileUploader') }
subject { described_class.new(:file, upload.id) }
before do
NamespaceFileUploader.new(group).store!(fixture_file_upload(Rails.root + 'spec/fixtures/dk.png', 'image/png'))
end
it 'downloads the file' do
stub_transfer(Gitlab::Geo::FileTransfer, 100)
expect { subject.execute }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::FileTransfer, -1)
expect { subject.execute }.to change { Geo::FileRegistry.failed.count }.by(1)
end
end
context 'LFS object' do
let(:lfs_object) { create(:lfs_object) }
subject { described_class.new(:lfs, lfs_object.id) }
it 'downloads an LFS object' do
stub_transfer(Gitlab::Geo::LfsTransfer, 100)
expect { subject.execute }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::LfsTransfer, -1)
expect { subject.execute }.to change { Geo::FileRegistry.failed.count }.by(1)
end
it 'logs a message' do
stub_transfer(Gitlab::Geo::LfsTransfer, 100)
expect(Gitlab::Geo::Logger).to receive(:info).with(hash_including(:message, :download_time_s, success: true, bytes_downloaded: 100)).and_call_original
subject.execute
end
end
context 'job artifacts' do
let(:job_artifact) { create(:ci_job_artifact) }
subject { described_class.new(:job_artifact, job_artifact.id) }
it 'downloads a job artifact' do
stub_transfer(Gitlab::Geo::JobArtifactTransfer, 100)
expect { subject.execute }.to change { Geo::FileRegistry.synced.count }.by(1)
end
it 'registers when the download fails' do
stub_transfer(Gitlab::Geo::JobArtifactTransfer, -1)
expect { subject.execute }.to change { Geo::FileRegistry.failed.count }.by(1)
end
it 'logs a message' do
stub_transfer(Gitlab::Geo::JobArtifactTransfer, 100)
expect(Gitlab::Geo::Logger).to receive(:info).with(hash_including(:message, :download_time_s, success: true, bytes_downloaded: 100)).and_call_original
subject.execute
end
end
context 'bad object type' do
it 'raises an error' do
expect { described_class.new(:bad, 1).execute }.to raise_error(NameError)
end
end
def stub_transfer(kls, result)
instance = double("(instance of #{kls})", download_from_primary: result)
allow(kls).to receive(:new).and_return(instance)
end
end
end
require 'spec_helper'
# Disable transactions via :delete method because a foreign table
# can't see changes inside a transaction of a different connection.
describe Geo::FilesExpireService, :geo, :delete do
let(:project) { create(:project) }
let!(:old_full_path) { project.full_path }
subject { described_class.new(project, old_full_path) }
describe '#execute' do
let(:file_uploader) { build(:file_uploader, project: project) }
let!(:upload) { Upload.find_by(path: file_uploader.upload_path) }
let!(:file_registry) { create(:geo_file_registry, file_id: upload.id) }
before do
project.update(path: "#{project.path}_renamed")
end
context 'when in Geo secondary node' do
before do
allow(Gitlab::Geo).to receive(:secondary?) { true }
end
it 'remove file from disk' do
file_path = File.join(subject.base_dir, upload.path)
expect(File.exist?(file_path)).to be_truthy
Sidekiq::Testing.inline! { subject.execute }
expect(File.exist?(file_path)).to be_falsey
end
it 'removes file_registry associates with upload' do
expect(file_registry.success).to be_truthy
subject.execute
expect { file_registry.reload }.to raise_error(ActiveRecord::RecordNotFound)
end
end
context 'when not in Geo secondary node' do
it 'no-op execute action' do
expect(subject).not_to receive(:schedule_file_removal)
expect(subject).not_to receive(:mark_for_resync!)
subject.execute
end
end
end
end
require 'spec_helper'
def base_path(storage)
File.join(FileUploader.root, storage.disk_path)
end
describe Geo::HashedStorageAttachmentsMigrationService do
let!(:project) { create(:project) }
let(:legacy_storage) { Storage::LegacyProject.new(project) }
let(:hashed_storage) { Storage::HashedProject.new(project) }
let!(:upload) { Upload.find_by(path: file_uploader.upload_path) }
let(:file_uploader) { build(:file_uploader, project: project) }
let(:old_path) { File.join(base_path(legacy_storage), upload.path) }
let(:new_path) { File.join(base_path(hashed_storage), upload.path) }
subject(:service) do
described_class.new(project.id,
old_attachments_path: legacy_storage.disk_path,
new_attachments_path: hashed_storage.disk_path)
end
describe '#execute' do
context 'when succeeds' do
it 'moves attachments to hashed storage layout' do
expect(File.file?(old_path)).to be_truthy
expect(File.file?(new_path)).to be_falsey
expect(File.exist?(base_path(legacy_storage))).to be_truthy
expect(File.exist?(base_path(hashed_storage))).to be_falsey
expect(FileUtils).to receive(:mv).with(base_path(legacy_storage), base_path(hashed_storage)).and_call_original
service.execute
expect(File.exist?(base_path(hashed_storage))).to be_truthy
expect(File.exist?(base_path(legacy_storage))).to be_falsey
expect(File.file?(old_path)).to be_falsey
expect(File.file?(new_path)).to be_truthy
end
end
context 'when original folder does not exist anymore' do
before do
FileUtils.rm_rf(base_path(legacy_storage))
end
it 'skips moving folders and go to next' do
expect(FileUtils).not_to receive(:mv).with(base_path(legacy_storage), base_path(hashed_storage))
service.execute
expect(File.exist?(base_path(hashed_storage))).to be_falsey
expect(File.file?(new_path)).to be_falsey
end
end
context 'when target folder already exists' do
before do
FileUtils.mkdir_p(base_path(hashed_storage))
end
it 'raises AttachmentMigrationError' do
expect(FileUtils).not_to receive(:mv).with(base_path(legacy_storage), base_path(hashed_storage))
expect { service.execute }.to raise_error(::Geo::AttachmentMigrationError)
end
end
end
describe '#async_execute' do
it 'starts the worker' do
expect(Geo::HashedStorageAttachmentsMigrationWorker).to receive(:perform_async)
service.async_execute
end
it 'returns job id' do
allow(Geo::HashedStorageAttachmentsMigrationWorker).to receive(:perform_async).and_return('foo')
expect(service.async_execute).to eq('foo')
end
end
end
require 'spec_helper'
describe Geo::FileDownloadDispatchWorker, :geo do
include ::EE::GeoHelpers
let(:primary) { create(:geo_node, :primary, host: 'primary-geo-node') }
let(:secondary) { create(:geo_node) }
before do
stub_current_geo_node(secondary)
allow_any_instance_of(Gitlab::ExclusiveLease).to receive(:try_obtain).and_return(true)
allow_any_instance_of(Gitlab::ExclusiveLease).to receive(:renew).and_return(true)
allow_any_instance_of(described_class).to receive(:over_time?).and_return(false)
WebMock.stub_request(:get, /primary-geo-node/).to_return(status: 200, body: "", headers: {})
end
subject { described_class.new }
shared_examples '#perform' do |skip_tests|
before do
skip('FDW is not configured') if skip_tests
end
it 'does not schedule anything when tracking database is not configured' do
create(:lfs_object, :with_file)
allow(Gitlab::Geo).to receive(:geo_database_configured?) { false }
expect(Geo::FileDownloadWorker).not_to receive(:perform_async)
subject.perform
# We need to unstub here or the DatabaseCleaner will have issues since it
# will appear as though the tracking DB were not available
allow(Gitlab::Geo).to receive(:geo_database_configured?).and_call_original
end
it 'does not schedule anything when node is disabled' do
create(:lfs_object, :with_file)
secondary.enabled = false
secondary.save
expect(Geo::FileDownloadWorker).not_to receive(:perform_async)
subject.perform
end
context 'with LFS objects' do
let!(:lfs_object_local_store) { create(:lfs_object, :with_file) }
let!(:lfs_object_remote_store) { create(:lfs_object, :with_file) }
before do
stub_lfs_object_storage
lfs_object_remote_store.file.migrate!(LfsObjectUploader::Store::REMOTE)
end
it 'filters S3-backed files' do
expect(Geo::FileDownloadWorker).to receive(:perform_async).with(:lfs, lfs_object_local_store.id)
expect(Geo::FileDownloadWorker).not_to receive(:perform_async).with(:lfs, lfs_object_remote_store.id)
subject.perform
end
end
context 'with job artifacts' do
it 'performs Geo::FileDownloadWorker for unsynced job artifacts' do
artifact = create(:ci_job_artifact)
expect(Geo::FileDownloadWorker).to receive(:perform_async)
.with(:job_artifact, artifact.id).once.and_return(spy)
subject.perform
end
it 'performs Geo::FileDownloadWorker for failed-sync job artifacts' do
artifact = create(:ci_job_artifact)
Geo::FileRegistry.create!(file_type: :job_artifact, file_id: artifact.id, bytes: 0, success: false)
expect(Geo::FileDownloadWorker).to receive(:perform_async)
.with('job_artifact', artifact.id).once.and_return(spy)
subject.perform
end
it 'does not perform Geo::FileDownloadWorker for synced job artifacts' do
artifact = create(:ci_job_artifact)
Geo::FileRegistry.create!(file_type: :job_artifact, file_id: artifact.id, bytes: 1234, success: true)
expect(Geo::FileDownloadWorker).not_to receive(:perform_async)
subject.perform
end
it 'does not perform Geo::FileDownloadWorker for synced job artifacts even with 0 bytes downloaded' do
artifact = create(:ci_job_artifact)
Geo::FileRegistry.create!(file_type: :job_artifact, file_id: artifact.id, bytes: 0, success: true)
expect(Geo::FileDownloadWorker).not_to receive(:perform_async)
subject.perform
end
end
# Test the case where we have:
#
# 1. A total of 10 files in the queue, and we can load a maximimum of 5 and send 2 at a time.
# 2. We send 2, wait for 1 to finish, and then send again.
it 'attempts to load a new batch without pending downloads' do
stub_const('Geo::BaseSchedulerWorker::DB_RETRIEVE_BATCH_SIZE', 5)
secondary.update!(files_max_capacity: 2)
allow_any_instance_of(::Gitlab::Geo::Transfer).to receive(:download_from_primary).and_return(100)
avatar = fixture_file_upload(Rails.root.join('spec/fixtures/dk.png'))
create_list(:lfs_object, 2, :with_file)
create_list(:user, 2, avatar: avatar)
create_list(:note, 2, :with_attachment)
create_list(:upload, 1, :personal_snippet_upload)
create_list(:ci_job_artifact, 1)
create(:appearance, logo: avatar, header_logo: avatar)
expect(Geo::FileDownloadWorker).to receive(:perform_async).exactly(10).times.and_call_original
# For 10 downloads, we expect four database reloads:
# 1. Load the first batch of 5.
# 2. 4 get sent out, 1 remains. This triggers another reload, which loads in the next 5.
# 3. Those 4 get sent out, and 1 remains.
# 3. Since the second reload filled the pipe with 4, we need to do a final reload to ensure
# zero are left.
expect(subject).to receive(:load_pending_resources).exactly(4).times.and_call_original
Sidekiq::Testing.inline! do
subject.perform
end
end
context 'with a failed file' do
let(:failed_registry) { create(:geo_file_registry, :lfs, file_id: 999, success: false) }
it 'does not stall backfill' do
unsynced = create(:lfs_object, :with_file)
stub_const('Geo::BaseSchedulerWorker::DB_RETRIEVE_BATCH_SIZE', 1)
expect(Geo::FileDownloadWorker).not_to receive(:perform_async).with(:lfs, failed_registry.file_id)
expect(Geo::FileDownloadWorker).to receive(:perform_async).with(:lfs, unsynced.id)
subject.perform
end
it 'retries failed files' do
expect(Geo::FileDownloadWorker).to receive(:perform_async).with('lfs', failed_registry.file_id)
subject.perform
end
it 'does not retries failed files when retry_at is tomorrow' do
failed_registry = create(:geo_file_registry, :lfs, file_id: 999, success: false, retry_at: Date.tomorrow)
expect(Geo::FileDownloadWorker).not_to receive(:perform_async).with('lfs', failed_registry.file_id)
subject.perform
end
it 'does not retries failed files when retry_at is in the past' do
failed_registry = create(:geo_file_registry, :lfs, file_id: 999, success: false, retry_at: Date.yesterday)
expect(Geo::FileDownloadWorker).to receive(:perform_async).with('lfs', failed_registry.file_id)
subject.perform
end
end
context 'when node has namespace restrictions' do
let(:synced_group) { create(:group) }
let(:project_in_synced_group) { create(:project, group: synced_group) }
let(:unsynced_project) { create(:project) }
before do
allow(ProjectCacheWorker).to receive(:perform_async).and_return(true)
secondary.update!(selective_sync_type: 'namespaces', namespaces: [synced_group])
end
it 'does not perform Geo::FileDownloadWorker for LFS object that does not belong to selected namespaces to replicate' do
lfs_object_in_synced_group = create(:lfs_objects_project, project: project_in_synced_group)
create(:lfs_objects_project, project: unsynced_project)
expect(Geo::FileDownloadWorker).to receive(:perform_async)
.with(:lfs, lfs_object_in_synced_group.lfs_object_id).once.and_return(spy)
subject.perform
end
it 'does not perform Geo::FileDownloadWorker for job artifact that does not belong to selected namespaces to replicate' do
create(:ci_job_artifact, project: unsynced_project)
job_artifact_in_synced_group = create(:ci_job_artifact, project: project_in_synced_group)
expect(Geo::FileDownloadWorker).to receive(:perform_async)
.with(:job_artifact, job_artifact_in_synced_group.id).once.and_return(spy)
subject.perform
end
it 'does not perform Geo::FileDownloadWorker for upload objects that do not belong to selected namespaces to replicate' do
avatar = fixture_file_upload(Rails.root.join('spec/fixtures/dk.png'))
avatar_in_synced_group = create(:upload, model: synced_group, path: avatar)
create(:upload, model: create(:group), path: avatar)
avatar_in_project_in_synced_group = create(:upload, model: project_in_synced_group, path: avatar)
create(:upload, model: unsynced_project, path: avatar)
expect(Geo::FileDownloadWorker).to receive(:perform_async)
.with('avatar', avatar_in_project_in_synced_group.id).once.and_return(spy)
expect(Geo::FileDownloadWorker).to receive(:perform_async)
.with('avatar', avatar_in_synced_group.id).once.and_return(spy)
subject.perform
end
end
end
# Disable transactions via :delete method because a foreign table
# can't see changes inside a transaction of a different connection.
describe 'when PostgreSQL FDW is available', :geo, :delete do
# Skip if FDW isn't activated on this database
it_behaves_like '#perform', Gitlab::Database.postgresql? && !Gitlab::Geo.fdw?
end
describe 'when PostgreSQL FDW is not enabled', :geo do
before do
allow(Gitlab::Geo).to receive(:fdw?).and_return(false)
end
it_behaves_like '#perform', false
end
describe '#take_batch' do
it 'returns a batch of jobs' do
a = [[2, :lfs], [3, :lfs]]
b = []
c = [[3, :job_artifact], [8, :job_artifact], [9, :job_artifact]]
expect(subject).to receive(:db_retrieve_batch_size).and_return(4)
expect(subject.send(:take_batch, a, b, c)).to eq([
[3, :job_artifact],
[2, :lfs],
[8, :job_artifact],
[3, :lfs]
])
end
end
describe '#interleave' do
# Notice ties are resolved by taking the "first" tied element
it 'interleaves 2 arrays' do
a = %w{1 2 3}
b = %w{A B C}
expect(subject.send(:interleave, a, b)).to eq(%w{1 A 2 B 3 C})
end
# Notice there are no ties in this call
it 'interleaves 2 arrays with a longer second array' do
a = %w{1 2}
b = %w{A B C}
expect(subject.send(:interleave, a, b)).to eq(%w{A 1 B 2 C})
end
it 'interleaves 2 arrays with a longer first array' do
a = %w{1 2 3}
b = %w{A B}
expect(subject.send(:interleave, a, b)).to eq(%w{1 A 2 B 3})
end
it 'interleaves 3 arrays' do
a = %w{1 2 3}
b = %w{A B C}
c = %w{i ii iii}
expect(subject.send(:interleave, a, b, c)).to eq(%w{1 A i 2 B ii 3 C iii})
end
it 'interleaves 3 arrays of unequal length' do
a = %w{1 2}
b = %w{A}
c = %w{i ii iii iiii}
expect(subject.send(:interleave, a, b, c)).to eq(%w{i 1 ii A iii 2 iiii})
end
end
end
FactoryBot.define do
factory :geo_event_log, class: Geo::EventLog do
trait :created_event do
repository_created_event factory: :geo_repository_created_event
end
trait :updated_event do
repository_updated_event factory: :geo_repository_updated_event
end
trait :deleted_event do
repository_deleted_event factory: :geo_repository_deleted_event
end
trait :renamed_event do
repository_renamed_event factory: :geo_repository_renamed_event
end
trait :hashed_storage_migration_event do
hashed_storage_migrated_event factory: :geo_hashed_storage_migrated_event
end
trait :hashed_storage_attachments_event do
hashed_storage_attachments_event factory: :geo_hashed_storage_attachments_event
end
trait :lfs_object_deleted_event do
lfs_object_deleted_event factory: :geo_lfs_object_deleted_event
end
trait :job_artifact_deleted_event do
job_artifact_deleted_event factory: :geo_job_artifact_deleted_event
end
end
factory :geo_repository_created_event, class: Geo::RepositoryCreatedEvent do
project
repository_storage_name { project.repository_storage }
repository_storage_path { project.repository_storage_path }
add_attribute(:repo_path) { project.disk_path }
project_name { project.name }
wiki_path { project.wiki.disk_path }
end
factory :geo_repository_updated_event, class: Geo::RepositoryUpdatedEvent do
project
source 0
branches_affected 0
tags_affected 0
end
factory :geo_repository_deleted_event, class: Geo::RepositoryDeletedEvent do
project
repository_storage_name { project.repository_storage }
repository_storage_path { project.repository_storage_path }
deleted_path { project.path_with_namespace }
deleted_project_name { project.name }
end
factory :geo_repositories_changed_event, class: Geo::RepositoriesChangedEvent do
geo_node
end
factory :geo_repository_renamed_event, class: Geo::RepositoryRenamedEvent do
project { create(:project, :repository) }
repository_storage_name { project.repository_storage }
repository_storage_path { project.repository_storage_path }
old_path_with_namespace { project.path_with_namespace }
new_path_with_namespace { project.path_with_namespace + '_new' }
old_wiki_path_with_namespace { project.wiki.path_with_namespace }
new_wiki_path_with_namespace { project.wiki.path_with_namespace + '_new' }
old_path { project.path }
new_path { project.path + '_new' }
end
factory :geo_hashed_storage_migrated_event, class: Geo::HashedStorageMigratedEvent do
project { create(:project, :repository) }
repository_storage_name { project.repository_storage }
repository_storage_path { project.repository_storage_path }
old_disk_path { project.path_with_namespace }
new_disk_path { project.path_with_namespace + '_new' }
old_wiki_disk_path { project.wiki.path_with_namespace }
new_wiki_disk_path { project.wiki.path_with_namespace + '_new' }
new_storage_version { Project::HASHED_STORAGE_FEATURES[:repository] }
end
factory :geo_hashed_storage_attachments_event, class: Geo::HashedStorageAttachmentsEvent do
project { create(:project, :repository) }
old_attachments_path { Storage::LegacyProject.new(project).disk_path }
new_attachments_path { Storage::HashedProject.new(project).disk_path }
end
factory :geo_lfs_object_deleted_event, class: Geo::LfsObjectDeletedEvent do
lfs_object { create(:lfs_object, :with_file) }
after(:build, :stub) do |event, _|
local_store_path = Pathname.new(LfsObjectUploader.root)
relative_path = Pathname.new(event.lfs_object.file.path).relative_path_from(local_store_path)
event.oid = event.lfs_object.oid
event.file_path = relative_path
end
end
factory :geo_job_artifact_deleted_event, class: Geo::JobArtifactDeletedEvent do
job_artifact { create(:ci_job_artifact, :archive) }
after(:build, :stub) do |event, _|
local_store_path = Pathname.new(JobArtifactUploader.root)
relative_path = Pathname.new(event.job_artifact.file.path).relative_path_from(local_store_path)
event.file_path = relative_path
end
end
end
Loading
Loading
@@ -3,7 +3,6 @@ FactoryBot.define do
model { build(:project) }
size 100.kilobytes
uploader "AvatarUploader"
store ObjectStorage::Store::LOCAL
mount_point :avatar
secret nil
store ObjectStorage::Store::LOCAL
Loading
Loading
Loading
Loading
@@ -18,7 +18,7 @@ describe Ci::JobArtifact do
describe 'callbacks' do
subject { create(:ci_job_artifact, :archive) }
 
describe '#schedule_migration_to_object_storage' do
describe '#schedule_background_upload' do
context 'when object storage is disabled' do
before do
stub_artifacts_object_storage(enabled: false)
Loading
Loading
shared_examples "matches the method pattern" do |method|
let(:target) { subject }
let(:args) { nil }
let(:pattern) { patterns[method] }
it do
return skip "No pattern provided, skipping." unless pattern
expect(target.method(method).call(*args)).to match(pattern)
end
end
shared_examples "builds correct paths" do |**patterns|
let(:patterns) { patterns }
before do
allow(subject).to receive(:filename).and_return('<filename>')
end
describe "#store_dir" do
it_behaves_like "matches the method pattern", :store_dir
end
describe "#cache_dir" do
it_behaves_like "matches the method pattern", :cache_dir
end
describe "#work_dir" do
it_behaves_like "matches the method pattern", :work_dir
end
describe "#upload_path" do
it_behaves_like "matches the method pattern", :upload_path
end
describe ".absolute_path" do
it_behaves_like "matches the method pattern", :absolute_path do
let(:target) { subject.class }
let(:args) { [upload] }
end
end
describe ".base_dir" do
it_behaves_like "matches the method pattern", :base_dir do
let(:target) { subject.class }
end
end
end
Loading
Loading
@@ -20,7 +20,7 @@ describe ObjectStorageUploadWorker do
stub_lfs_object_storage(background_upload: true)
end
 
it 'uploads object to storage' do
it 'uploads object to storage' d
expect { perform }.to change { lfs_object.reload.file_store }.from(local).to(remote)
end
 
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment