Skip to content
Snippets Groups Projects
Commit 7f3bff15 authored by GitLab Bot's avatar GitLab Bot
Browse files

Add latest changes from gitlab-org/gitlab@master

parent 8d0aed5e
No related branches found
No related tags found
No related merge requests found
Showing
with 291 additions and 34 deletions
1.71.0
1.72.0
Loading
Loading
@@ -172,11 +172,7 @@ class Clusters::ClustersController < Clusters::BaseController
private
 
def destroy_params
# To be uncomented on https://gitlab.com/gitlab-org/gitlab/merge_requests/16954
# This MR got split into other since it was too big.
#
# params.permit(:cleanup)
{}
params.permit(:cleanup)
end
 
def update_params
Loading
Loading
Loading
Loading
@@ -63,7 +63,11 @@ module NotesActions
json.merge!(note_json(@note))
end
 
render json: json
if @note.errors.present? && @note.errors.keys != [:commands_only]
render json: json, status: :unprocessable_entity
else
render json: json
end
end
format.html { redirect_back_or_default }
end
Loading
Loading
Loading
Loading
@@ -2,6 +2,7 @@
 
class Projects::ErrorTrackingController < Projects::ApplicationController
before_action :authorize_read_sentry_issue!
before_action :set_issue_id, only: [:details, :stack_trace]
 
POLLING_INTERVAL = 10_000
 
Loading
Loading
@@ -113,6 +114,10 @@ class Projects::ErrorTrackingController < Projects::ApplicationController
params.permit(:issue_id)
end
 
def set_issue_id
@issue_id = issue_details_params[:issue_id]
end
def set_polling_interval
Gitlab::PollingInterval.set_header(response, interval: POLLING_INTERVAL)
end
Loading
Loading
Loading
Loading
@@ -14,12 +14,12 @@ module Projects::ErrorTrackingHelper
}
end
 
def error_details_data(project, issue)
opts = [project, issue, { format: :json }]
def error_details_data(project, issue_id)
opts = [project, issue_id, { format: :json }]
 
{
'issue-details-path' => details_namespace_project_error_tracking_index_path(*opts),
'issue-stack-trace-path' => stack_trace_namespace_project_error_tracking_index_path(*opts)
'issue-details-path' => details_project_error_tracking_index_path(*opts),
'issue-stack-trace-path' => stack_trace_project_error_tracking_index_path(*opts)
}
end
end
Loading
Loading
@@ -23,6 +23,7 @@ module Clusters
}.freeze
DEFAULT_ENVIRONMENT = '*'
KUBE_INGRESS_BASE_DOMAIN = 'KUBE_INGRESS_BASE_DOMAIN'
APPLICATIONS_ASSOCIATIONS = APPLICATIONS.values.map(&:association_name).freeze
 
belongs_to :user
belongs_to :management_project, class_name: '::Project', optional: true
Loading
Loading
@@ -117,7 +118,7 @@ module Clusters
scope :aws_installed, -> { aws_provided.joins(:provider_aws).merge(Clusters::Providers::Aws.with_status(:created)) }
 
scope :managed, -> { where(managed: true) }
scope :with_persisted_applications, -> { eager_load(*APPLICATIONS_ASSOCIATIONS) }
scope :default_environment, -> { where(environment_scope: DEFAULT_ENVIRONMENT) }
 
scope :for_project_namespace, -> (namespace_id) { joins(:projects).where(projects: { namespace_id: namespace_id }) }
Loading
Loading
@@ -195,9 +196,13 @@ module Clusters
{ connection_status: retrieve_connection_status }
end
 
def persisted_applications
APPLICATIONS_ASSOCIATIONS.map(&method(:public_send)).compact
end
def applications
APPLICATIONS.values.map do |application_class|
public_send(application_class.association_name) || public_send("build_#{application_class.association_name}") # rubocop:disable GitlabSecurity/PublicSend
APPLICATIONS_ASSOCIATIONS.map do |association_name|
public_send(association_name) || public_send("build_#{association_name}") # rubocop:disable GitlabSecurity/PublicSend
end
end
 
Loading
Loading
Loading
Loading
@@ -101,3 +101,5 @@ class MergeRequestPollCachedWidgetEntity < IssuableEntity
merged_by: merge_event&.author)
end
end
MergeRequestPollCachedWidgetEntity.prepend_if_ee('EE::MergeRequestPollCachedWidgetEntity')
# frozen_string_literal: true
module Clusters
module Cleanup
class AppService < Clusters::Cleanup::BaseService
def execute
persisted_applications = @cluster.persisted_applications
persisted_applications.each do |app|
next unless app.available?
next unless app.can_uninstall?
log_event(:uninstalling_app, application: app.class.application_name)
uninstall_app_async(app)
end
# Keep calling the worker untill all dependencies are uninstalled
return schedule_next_execution(Clusters::Cleanup::AppWorker) if persisted_applications.any?
log_event(:schedule_remove_project_namespaces)
cluster.continue_cleanup!
end
private
def uninstall_app_async(application)
application.make_scheduled!
Clusters::Applications::UninstallWorker.perform_async(application.name, application.id)
end
end
end
end
# frozen_string_literal: true
module Clusters
module Cleanup
class BaseService
DEFAULT_EXECUTION_INTERVAL = 1.minute
def initialize(cluster, execution_count = 0)
@cluster = cluster
@execution_count = execution_count
end
private
attr_reader :cluster
def logger
@logger ||= Gitlab::Kubernetes::Logger.build
end
def log_event(event, extra_data = {})
meta = {
service: self.class.name,
cluster_id: cluster.id,
execution_count: @execution_count,
event: event
}
logger.info(meta.merge(extra_data))
end
def schedule_next_execution(worker_class)
log_event(:scheduling_execution, next_execution: @execution_count + 1)
worker_class.perform_in(execution_interval, cluster.id, @execution_count + 1)
end
# Override this method to customize the execution interval
def execution_interval
DEFAULT_EXECUTION_INTERVAL
end
end
end
end
# frozen_string_literal: true
module Clusters
module Cleanup
class ProjectNamespaceService < BaseService
KUBERNETES_NAMESPACE_BATCH_SIZE = 100
def execute
delete_project_namespaces_in_batches
# Keep calling the worker untill all namespaces are deleted
if cluster.kubernetes_namespaces.exists?
return schedule_next_execution(Clusters::Cleanup::ProjectNamespaceWorker)
end
cluster.continue_cleanup!
end
private
def delete_project_namespaces_in_batches
kubernetes_namespaces_batch = cluster.kubernetes_namespaces.first(KUBERNETES_NAMESPACE_BATCH_SIZE)
kubernetes_namespaces_batch.each do |kubernetes_namespace|
log_event(:deleting_project_namespace, namespace: kubernetes_namespace.namespace)
begin
kubeclient_delete_namespace(kubernetes_namespace)
rescue Kubeclient::HttpError
next
end
kubernetes_namespace.destroy!
end
end
def kubeclient_delete_namespace(kubernetes_namespace)
cluster.kubeclient.delete_namespace(kubernetes_namespace.namespace)
rescue Kubeclient::ResourceNotFoundError
# no-op: nothing to delete
end
end
end
end
# frozen_string_literal: true
module Clusters
module Cleanup
class ServiceAccountService < BaseService
def execute
delete_gitlab_service_account
log_event(:destroying_cluster)
cluster.destroy!
end
private
def delete_gitlab_service_account
log_event(:deleting_gitlab_service_account)
cluster.kubeclient.delete_service_account(
::Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAME,
::Clusters::Kubernetes::GITLAB_SERVICE_ACCOUNT_NAMESPACE
)
rescue Kubeclient::ResourceNotFoundError
end
end
end
end
- if !Gitlab::CurrentSettings.eks_integration_enabled?
- documentation_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: help_page_path("integration/amazon") }
- documentation_link_start = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: help_page_path('user/project/clusters/add_remove_clusters.md',
anchor: 'additional-requirements-for-self-managed-instances') }
= s_('Amazon authentication is not %{link_start}correctly configured%{link_end}. Ask your GitLab administrator if you want to use this service.').html_safe % { link_start: documentation_link_start, link_end: '<a/>'.html_safe }
- else
.js-create-eks-cluster-form-container{ data: { 'gitlab-managed-cluster-help-path' => help_page_path('user/project/clusters/index.md', anchor: 'gitlab-managed-clusters'),
Loading
Loading
@@ -16,5 +17,7 @@
'account-id' => Gitlab::CurrentSettings.eks_account_id,
'external-id' => @aws_role.role_external_id,
'kubernetes-integration-help-path' => help_page_path('user/project/clusters/index'),
'account-and-external-ids-help-path' => help_page_path('user/project/clusters/add_remove_clusters.md', anchor: 'eks-cluster'),
'create-role-arn-help-path' => help_page_path('user/project/clusters/add_remove_clusters.md', anchor: 'eks-cluster'),
'external-link-icon' => icon('external-link'),
'has-credentials' => @aws_role.role_arn.present?.to_s } }
- page_title _('Error Details')
- add_to_breadcrumbs 'Errors', project_error_tracking_index_path(@project)
 
#js-error_details{ data: error_details_data(@current_user, @project) }
#js-error_details{ data: error_details_data(@project, @issue_id) }
Loading
Loading
@@ -38,6 +38,9 @@
- gcp_cluster:cluster_patch_app
- gcp_cluster:cluster_upgrade_app
- gcp_cluster:cluster_provision
- gcp_cluster:clusters_cleanup_app
- gcp_cluster:clusters_cleanup_project_namespace
- gcp_cluster:clusters_cleanup_service_account
- gcp_cluster:cluster_wait_for_app_installation
- gcp_cluster:wait_for_cluster_creation
- gcp_cluster:cluster_wait_for_ingress_ip_address
Loading
Loading
Loading
Loading
@@ -3,13 +3,16 @@
module Clusters
module Cleanup
class AppWorker
include ApplicationWorker
include ClusterQueue
include ClusterApplications
include ClusterCleanupMethods
 
# TODO: Merge with https://gitlab.com/gitlab-org/gitlab/merge_requests/16954
# We're splitting the above MR in smaller chunks to facilitate reviews
def perform
def perform(cluster_id, execution_count = 0)
Clusters::Cluster.with_persisted_applications.find_by_id(cluster_id).try do |cluster|
break unless cluster.cleanup_uninstalling_applications?
break exceeded_execution_limit(cluster) if exceeded_execution_limit?(execution_count)
::Clusters::Cleanup::AppService.new(cluster, execution_count).execute
end
end
end
end
Loading
Loading
Loading
Loading
@@ -3,13 +3,16 @@
module Clusters
module Cleanup
class ProjectNamespaceWorker
include ApplicationWorker
include ClusterQueue
include ClusterApplications
include ClusterCleanupMethods
 
# TODO: Merge with https://gitlab.com/gitlab-org/gitlab/merge_requests/16954
# We're splitting the above MR in smaller chunks to facilitate reviews
def perform
def perform(cluster_id, execution_count = 0)
Clusters::Cluster.find_by_id(cluster_id).try do |cluster|
break unless cluster.cleanup_removing_project_namespaces?
break exceeded_execution_limit(cluster) if exceeded_execution_limit?(execution_count)
Clusters::Cleanup::ProjectNamespaceService.new(cluster, execution_count).execute
end
end
end
end
Loading
Loading
Loading
Loading
@@ -3,13 +3,14 @@
module Clusters
module Cleanup
class ServiceAccountWorker
include ApplicationWorker
include ClusterQueue
include ClusterApplications
include ClusterCleanupMethods
 
# TODO: Merge with https://gitlab.com/gitlab-org/gitlab/merge_requests/16954
# We're splitting the above MR in smaller chunks to facilitate reviews
def perform
def perform(cluster_id)
Clusters::Cluster.find_by_id(cluster_id).try do |cluster|
break unless cluster.cleanup_removing_service_account?
Clusters::Cleanup::ServiceAccountService.new(cluster).execute
end
end
end
end
Loading
Loading
# frozen_string_literal: true
# Concern for setting Sidekiq settings for the various GitLab ObjectStorage workers.
module ClusterCleanupMethods
extend ActiveSupport::Concern
include ApplicationWorker
include ClusterQueue
DEFAULT_EXECUTION_LIMIT = 10
ExceededExecutionLimitError = Class.new(StandardError)
included do
worker_has_external_dependencies!
sidekiq_options retry: 3
sidekiq_retries_exhausted do |msg, error|
cluster_id = msg['args'][0]
cluster = Clusters::Cluster.find_by_id(cluster_id)
cluster.make_cleanup_errored!("#{self.class.name} retried too many times") if cluster
logger = Gitlab::Kubernetes::Logger.build
logger.error({
exception: error,
cluster_id: cluster_id,
class_name: msg['class'],
event: :sidekiq_retries_exhausted,
message: msg['error_message']
})
end
end
private
# Override this method to customize the execution_limit
def execution_limit
DEFAULT_EXECUTION_LIMIT
end
def exceeded_execution_limit?(execution_count)
execution_count >= execution_limit
end
def logger
@logger ||= Gitlab::Kubernetes::Logger.build
end
def exceeded_execution_limit(cluster)
log_exceeded_execution_limit_error(cluster)
cluster.make_cleanup_errored!("#{self.class.name} exceeded the execution limit")
end
def cluster_applications_and_status(cluster)
cluster.persisted_applications
.map { |application| "#{application.name}:#{application.status_name}" }
.join(",")
end
def log_exceeded_execution_limit_error(cluster)
logger.error({
exception: ExceededExecutionLimitError.name,
cluster_id: cluster.id,
class_name: self.class.name,
cleanup_status: cluster.cleanup_status_name,
applications: cluster_applications_and_status(cluster),
event: :failed_to_remove_cluster_and_resources,
message: "exceeded execution limit of #{execution_limit} tries"
})
end
end
---
title: Correct link to Merge trains documentation on MR widget
merge_request: 19726
author:
type: changed
---
title: Fix group managed accounts members cleanup
merge_request: 20157
author:
type: fixed
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment