Skip to content
Snippets Groups Projects
Commit 20758bc3 authored by GitLab Bot's avatar GitLab Bot
Browse files

Add latest changes from gitlab-org/gitlab@master

parent a98649b7
No related branches found
No related tags found
No related merge requests found
Showing
with 289 additions and 116 deletions
Loading
Loading
@@ -22,8 +22,7 @@ import MonitorTimeSeriesChart from './charts/time_series.vue';
import MonitorSingleStatChart from './charts/single_stat.vue';
import GraphGroup from './graph_group.vue';
import EmptyState from './empty_state.vue';
import TrackEventDirective from '~/vue_shared/directives/track_event';
import { getTimeDiff, isValidDate, downloadCSVOptions, generateLinkToChartOptions } from '../utils';
import { getTimeDiff, isValidDate } from '../utils';
 
export default {
components: {
Loading
Loading
@@ -44,7 +43,6 @@ export default {
directives: {
GlModal: GlModalDirective,
GlTooltip: GlTooltipDirective,
TrackEvent: TrackEventDirective,
},
props: {
externalDashboardUrl: {
Loading
Loading
@@ -300,8 +298,6 @@ export default {
onDateTimePickerApply(timeWindowUrlParams) {
return redirectTo(mergeUrlParams(timeWindowUrlParams, window.location.href));
},
downloadCSVOptions,
generateLinkToChartOptions,
},
addMetric: {
title: s__('Metrics|Add metric'),
Loading
Loading
Loading
Loading
@@ -883,6 +883,15 @@ $ide-commit-header-height: 48px;
margin-right: $ide-tree-padding;
border-bottom: 1px solid $white-dark;
 
svg {
color: $gray-700;
&:focus,
&:hover {
color: $blue-600;
}
}
.ide-new-btn {
margin-left: auto;
}
Loading
Loading
@@ -899,6 +908,11 @@ $ide-commit-header-height: 48px;
.dropdown-menu-toggle {
svg {
vertical-align: middle;
color: $gray-700;
&:hover {
color: $gray-700;
}
}
 
&:hover {
Loading
Loading
Loading
Loading
@@ -21,16 +21,11 @@
margin-bottom: 2px;
}
 
.issue-labels {
.issue-labels,
.author-link {
display: inline-block;
}
 
.issuable-meta {
.author-link {
display: inline-block;
}
}
.icon-merge-request-unmerged {
height: 13px;
margin-bottom: 3px;
Loading
Loading
@@ -53,16 +48,6 @@
margin-right: 15px;
}
 
.issues_content {
.title {
height: 40px;
}
form {
margin: 0;
}
}
form.edit-issue {
margin: 0;
}
Loading
Loading
@@ -79,10 +64,6 @@ ul.related-merge-requests > li {
margin-left: 5px;
}
 
.row_title {
vertical-align: bottom;
}
gl-emoji {
font-size: 1em;
}
Loading
Loading
@@ -93,10 +74,6 @@ ul.related-merge-requests > li {
font-weight: $gl-font-weight-bold;
}
 
.merge-request-id {
display: inline-block;
}
.merge-request-status {
&.merged {
color: $blue-500;
Loading
Loading
@@ -118,11 +95,7 @@ ul.related-merge-requests > li {
border-color: $issues-today-border;
}
 
&.closed {
background: $gray-light;
border-color: $border-color;
}
&.closed,
&.merged {
background: $gray-light;
border-color: $border-color;
Loading
Loading
@@ -160,9 +133,12 @@ ul.related-merge-requests > li {
padding-bottom: 37px;
}
 
.issues-nav-controls {
.issues-nav-controls,
.new-branch-col {
font-size: 0;
}
 
.issues-nav-controls {
.btn-group:empty {
display: none;
}
Loading
Loading
@@ -198,8 +174,6 @@ ul.related-merge-requests > li {
}
 
.new-branch-col {
font-size: 0;
.discussion-filter-container {
&:not(:only-child) {
margin-right: $gl-padding-8;
Loading
Loading
@@ -297,11 +271,11 @@ ul.related-merge-requests > li {
padding-top: 0;
align-self: center;
}
}
 
.create-mr-dropdown-wrap {
.btn-group:not(.hidden) {
display: inline-flex;
}
.create-mr-dropdown-wrap {
.btn-group:not(.hidden) {
display: inline-flex;
}
}
}
Loading
Loading
Loading
Loading
@@ -114,8 +114,10 @@ module ProjectsHelper
source = visible_fork_source(project)
 
if source
_('This will remove the fork relationship between this project and %{fork_source}.') %
msg = _('This will remove the fork relationship between this project and %{fork_source}.') %
{ fork_source: link_to(source.full_name, project_path(source)) }
msg.html_safe
else
_('This will remove the fork relationship between this project and other projects in the fork network.')
end
Loading
Loading
Loading
Loading
@@ -4,6 +4,7 @@ class ActiveSession
include ActiveModel::Model
 
SESSION_BATCH_SIZE = 200
ALLOWED_NUMBER_OF_ACTIVE_SESSIONS = 100
 
attr_accessor :created_at, :updated_at,
:session_id, :ip_address,
Loading
Loading
@@ -65,21 +66,22 @@ class ActiveSession
 
def self.destroy(user, session_id)
Gitlab::Redis::SharedState.with do |redis|
redis.srem(lookup_key_name(user.id), session_id)
destroy_sessions(redis, user, [session_id])
end
end
 
deleted_keys = redis.del(key_name(user.id, session_id))
def self.destroy_sessions(redis, user, session_ids)
key_names = session_ids.map {|session_id| key_name(user.id, session_id) }
session_names = session_ids.map {|session_id| "#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id}" }
 
# only allow deleting the devise session if we could actually find a
# related active session. this prevents another user from deleting
# someone else's session.
if deleted_keys > 0
redis.del("#{Gitlab::Redis::SharedState::SESSION_NAMESPACE}:#{session_id}")
end
end
redis.srem(lookup_key_name(user.id), session_ids)
redis.del(key_names)
redis.del(session_names)
end
 
def self.cleanup(user)
Gitlab::Redis::SharedState.with do |redis|
clean_up_old_sessions(redis, user)
cleaned_up_lookup_entries(redis, user)
end
end
Loading
Loading
@@ -118,19 +120,39 @@ class ActiveSession
end
end
 
def self.raw_active_session_entries(session_ids, user_id)
def self.raw_active_session_entries(redis, session_ids, user_id)
return [] if session_ids.empty?
 
Gitlab::Redis::SharedState.with do |redis|
entry_keys = session_ids.map { |session_id| key_name(user_id, session_id) }
entry_keys = session_ids.map { |session_id| key_name(user_id, session_id) }
redis.mget(entry_keys)
end
 
redis.mget(entry_keys)
def self.active_session_entries(session_ids, user_id, redis)
return [] if session_ids.empty?
entry_keys = raw_active_session_entries(redis, session_ids, user_id)
entry_keys.map do |raw_session|
Marshal.load(raw_session) # rubocop:disable Security/MarshalLoad
end
end
 
def self.clean_up_old_sessions(redis, user)
session_ids = session_ids_for_user(user.id)
return if session_ids.count <= ALLOWED_NUMBER_OF_ACTIVE_SESSIONS
# remove sessions if there are more than ALLOWED_NUMBER_OF_ACTIVE_SESSIONS.
sessions = active_session_entries(session_ids, user.id, redis)
sessions.sort_by! {|session| session.updated_at }.reverse!
sessions = sessions[ALLOWED_NUMBER_OF_ACTIVE_SESSIONS..-1].map { |session| session.session_id }
destroy_sessions(redis, user, sessions)
end
def self.cleaned_up_lookup_entries(redis, user)
session_ids = session_ids_for_user(user.id)
entries = raw_active_session_entries(session_ids, user.id)
entries = raw_active_session_entries(redis, session_ids, user.id)
 
# remove expired keys.
# only the single key entries are automatically expired by redis, the
Loading
Loading
---
title: Resolve Limit the number of stored sessions per user
merge_request: 19325
author:
type: added
---
title: 'Resolve Design view: Download single issue design image'
merge_request: 20703
author:
type: added
---
title: Removed unused methods in monitoring dashboard
merge_request: 20819
author:
type: other
---
title: Fix a display bug in the fork removal description message
merge_request: 20843
author:
type: fixed
Loading
Loading
@@ -219,13 +219,43 @@ Note that your exact needs may be more, depending on your workload. Your
workload is influenced by factors such as - but not limited to - how active your
users are, how much automation you use, mirroring, and repo/change size.
 
### 5,000 User Configuration
- **Supported Users (approximate):** 50,000
- **Test RPS Rates:** API: 100 RPS, Web: 10 RPS, Git: 10 RPS
- **Status:** Work-in-progress
- **Known Issues:** For the latest list of known performance issues head
[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues).
NOTE: **Note:** This architecture is a work-in-progress of the work so far. The
Quality team will be certifying this environment in late 2019 or early 2020. The specifications
may be adjusted prior to certification based on performance testing.
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
| GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Gitaly <br> - Gitaly Ruby workers on each node set to 20% of available CPUs | X[^1] . | 8 vCPU, 30GB Memory | n1-standard-8 |
| Redis Cache + Sentinel <br> - Cache maxmemory set to 90% of available memory | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Redis Persistent + Sentinel | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| NFS Server[^4] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| S3 Object Storage[^3] . | - | - | - |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| External load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
### 10,000 User Configuration
 
- **Supported Users (approximate):** 10,000
- **Test RPS Rates:** API: 200 RPS, Web: 20 RPS, Git: 20 RPS
- **Known Issues:** While validating the reference architectures, slow API
endpoints were discovered. For details, see the related issues list in
[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125).
- **Known Issues:** For the latest list of known performance issues head
[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues).
 
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
Loading
Loading
@@ -250,9 +280,8 @@ vendors a best effort like for like can be used.
 
- **Supported Users (approximate):** 25,000
- **Test RPS Rates:** API: 500 RPS, Web: 50 RPS, Git: 50 RPS
- **Known Issues:** While validating the reference architectures, slow API
endpoints were discovered. For details, see the related issues list in
[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125).
- **Known Issues:** For the latest list of known performance issues head
[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues).
 
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
Loading
Loading
@@ -277,9 +306,8 @@ vendors a best effort like for like can be used.
 
- **Supported Users (approximate):** 50,000
- **Test RPS Rates:** API: 1000 RPS, Web: 100 RPS, Git: 100 RPS
- **Known Issues:** While validating the reference architectures, slow API
endpoints were discovered. For details, see the related issues list in
[this issue](https://gitlab.com/gitlab-org/quality/performance/issues/125).
- **Known Issues:** For the latest list of known performance issues head
[here](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=Quality%3Aperformance-issues).
 
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
Loading
Loading
@@ -300,15 +328,16 @@ endpoints were discovered. For details, see the related issues list in
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
 
[^1]: Gitaly node requirements are dependent on customer data. We recommend 2
nodes as an absolute minimum for performance at the 10,000 and 25,000 user
scale and 4 nodes as an absolute minimum at the 50,000 user scale, but
additional nodes should be considered in conjunction with a review of
project counts and sizes.
[^1]: Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend 2 nodes as an absolute minimum for HA environments
and at least 4 nodes should be used when supporting 50,000 or more users.
We recommend that each Gitaly node should store no more than 5TB of data.
Additional nodes should be considered in conjunction with a review of expected
data size and spread based on the recommendations above.
 
[^2]: Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. However other reputable load balancers with similar feature sets
should also work here but be aware these aren't validated.
should also work instead but be aware these aren't validated.
 
[^3]: For data objects such as LFS, Uploads, Artifacts, etc... We recommend a S3 Object Storage
where possible over NFS due to better performance and availability. Several types of objects
Loading
Loading
Loading
Loading
@@ -18,6 +18,9 @@ review the sessions, and revoke any you don't recognize.
 
![Active sessions list](img/active_sessions_list.png)
 
CAUTION: **Caution:**
It is currently possible to have 100 active sessions at once. If the number of active sessions exceed 100, the oldest ones will be deleted.
<!-- ## Troubleshooting
 
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
Loading
Loading
Loading
Loading
@@ -4,6 +4,7 @@ require_relative '../gitlab/popen' unless defined?(Gitlab::Popen)
 
module Quality
class KubernetesClient
RESOURCE_LIST = 'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd'
CommandFailedError = Class.new(StandardError)
 
attr_reader :namespace
Loading
Loading
@@ -13,6 +14,13 @@ module Quality
end
 
def cleanup(release_name:, wait: true)
delete_by_selector(release_name: release_name, wait: wait)
delete_by_matching_name(release_name: release_name)
end
private
def delete_by_selector(release_name:, wait:)
selector = case release_name
when String
%(-l release="#{release_name}")
Loading
Loading
@@ -23,9 +31,9 @@ module Quality
end
 
command = [
%(--namespace "#{namespace}"),
'delete',
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa',
RESOURCE_LIST,
%(--namespace "#{namespace}"),
'--now',
'--ignore-not-found',
'--include-uninitialized',
Loading
Loading
@@ -36,7 +44,29 @@ module Quality
run_command(command)
end
 
private
def delete_by_matching_name(release_name:)
resource_names = raw_resource_names
command = [
'delete',
%(--namespace "#{namespace}")
]
Array(release_name).each do |release|
resource_names
.select { |resource_name| resource_name.include?(release) }
.each { |matching_resource| run_command(command + [matching_resource]) }
end
end
def raw_resource_names
command = [
'get',
RESOURCE_LIST,
%(--namespace "#{namespace}"),
'-o custom-columns=NAME:.metadata.name'
]
run_command(command).lines.map(&:strip)
end
 
def run_command(command)
final_command = ['kubectl', *command].join(' ')
Loading
Loading
Loading
Loading
@@ -92,7 +92,7 @@ namespace :gitlab do
lookup_key_count = redis.scard(key)
 
session_ids = ActiveSession.session_ids_for_user(user_id)
entries = ActiveSession.raw_active_session_entries(session_ids, user_id)
entries = ActiveSession.raw_active_session_entries(redis, session_ids, user_id)
session_ids_and_entries = session_ids.zip(entries)
 
inactive_session_ids = session_ids_and_entries.map do |session_id, session|
Loading
Loading
Loading
Loading
@@ -48,11 +48,31 @@ function delete_release() {
return
fi
 
echoinfo "Deleting release '${release}'..." true
helm_delete_release "${namespace}" "${release}"
kubectl_cleanup_release "${namespace}" "${release}"
}
function helm_delete_release() {
local namespace="${1}"
local release="${2}"
echoinfo "Deleting Helm release '${release}'..." true
 
helm delete --tiller-namespace "${namespace}" --purge "${release}"
}
 
function kubectl_cleanup_release() {
local namespace="${1}"
local release="${2}"
echoinfo "Deleting all K8s resources matching '${release}'..." true
kubectl --namespace "${namespace}" get ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd 2>&1 \
| grep "${release}" \
| awk '{print $1}' \
| xargs kubectl --namespace "${namespace}" delete \
|| true
}
function delete_failed_release() {
local namespace="${KUBE_NAMESPACE}"
local release="${CI_ENVIRONMENT_SLUG}"
Loading
Loading
Loading
Loading
@@ -71,7 +71,7 @@ module Trigger
 
# Can be overridden
def version_param_value(version_file)
File.read(version_file).strip
ENV[version_file]&.strip || File.read(version_file).strip
end
 
def variables
Loading
Loading
Loading
Loading
@@ -5,7 +5,7 @@ require 'spec_helper'
describe Gitlab::HealthChecks::Probes::Collection do
let(:readiness) { described_class.new(*checks) }
 
describe '#call' do
describe '#execute' do
subject { readiness.execute }
 
context 'with all checks' do
Loading
Loading
Loading
Loading
@@ -5,15 +5,27 @@ require 'fast_spec_helper'
RSpec.describe Quality::KubernetesClient do
let(:namespace) { 'review-apps-ee' }
let(:release_name) { 'my-release' }
let(:pod_for_release) { "pod-my-release-abcd" }
let(:raw_resource_names_str) { "NAME\nfoo\n#{pod_for_release}\nbar" }
let(:raw_resource_names) { raw_resource_names_str.lines.map(&:strip) }
 
subject { described_class.new(namespace: namespace) }
 
describe 'RESOURCE_LIST' do
it 'returns the correct list of resources separated by commas' do
expect(described_class::RESOURCE_LIST).to eq('ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa,crd')
end
end
describe '#cleanup' do
before do
allow(subject).to receive(:raw_resource_names).and_return(raw_resource_names)
end
it 'raises an error if the Kubernetes command fails' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=true -l release=\"#{release_name}\""])
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l release="#{release_name}")])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
 
expect { subject.cleanup(release_name: release_name) }.to raise_error(described_class::CommandFailedError)
Loading
Loading
@@ -21,9 +33,12 @@ RSpec.describe Quality::KubernetesClient do
 
it 'calls kubectl with the correct arguments' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=true -l release=\"#{release_name}\""])
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l release="#{release_name}")])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl delete --namespace "#{namespace}" #{pod_for_release})])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
 
# We're not verifying the output here, just silencing it
Loading
Loading
@@ -35,20 +50,22 @@ RSpec.describe Quality::KubernetesClient do
 
it 'raises an error if the Kubernetes command fails' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})'"])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})')])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
 
expect { subject.cleanup(release_name: release_name) }.to raise_error(described_class::CommandFailedError)
end
 
it 'calls kubectl with the correct arguments' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})'"])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=true -l 'release in (#{release_name.join(', ')})')])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl delete --namespace "#{namespace}" #{pod_for_release})])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
 
# We're not verifying the output here, just silencing it
expect { subject.cleanup(release_name: release_name) }.to output.to_stdout
Loading
Loading
@@ -58,24 +75,37 @@ RSpec.describe Quality::KubernetesClient do
context 'with `wait: false`' do
it 'raises an error if the Kubernetes command fails' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=false -l release=\"#{release_name}\""])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=false -l release="#{release_name}")])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: false)))
 
expect { subject.cleanup(release_name: release_name, wait: false) }.to raise_error(described_class::CommandFailedError)
end
 
it 'calls kubectl with the correct arguments' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl --namespace "#{namespace}" delete ) \
'ingress,svc,pdb,hpa,deploy,statefulset,job,pod,secret,configmap,pvc,secret,clusterrole,clusterrolebinding,role,rolebinding,sa ' \
"--now --ignore-not-found --include-uninitialized --wait=false -l release=\"#{release_name}\""])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
.with(["kubectl delete #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" --now --ignore-not-found --include-uninitialized --wait=false -l release="#{release_name}")])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with([%(kubectl delete --namespace "#{namespace}" #{pod_for_release})])
.and_return(Gitlab::Popen::Result.new([], '', '', double(success?: true)))
 
# We're not verifying the output here, just silencing it
expect { subject.cleanup(release_name: release_name, wait: false) }.to output.to_stdout
end
end
end
describe '#raw_resource_names' do
it 'calls kubectl to retrieve the resource names' do
expect(Gitlab::Popen).to receive(:popen_with_detail)
.with(["kubectl get #{described_class::RESOURCE_LIST} " +
%(--namespace "#{namespace}" -o custom-columns=NAME:.metadata.name)])
.and_return(Gitlab::Popen::Result.new([], raw_resource_names_str, '', double(success?: true)))
expect(subject.__send__(:raw_resource_names)).to eq(raw_resource_names)
end
end
end
Loading
Loading
@@ -242,23 +242,13 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
expect(redis.scan_each(match: "session:gitlab:*").to_a).to be_empty
end
end
it 'does not remove the devise session if the active session could not be found' do
Gitlab::Redis::SharedState.with do |redis|
redis.set("session:gitlab:6919a6f1bb119dd7396fadc38fd18d0d", '')
end
other_user = create(:user)
ActiveSession.destroy(other_user, request.session.id)
Gitlab::Redis::SharedState.with do |redis|
expect(redis.scan_each(match: "session:gitlab:*").to_a).not_to be_empty
end
end
end
 
describe '.cleanup' do
before do
stub_const("ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS", 5)
end
it 'removes obsolete lookup entries' do
Gitlab::Redis::SharedState.with do |redis|
redis.set("session:user:gitlab:#{user.id}:6919a6f1bb119dd7396fadc38fd18d0d", '')
Loading
Loading
@@ -276,5 +266,47 @@ RSpec.describe ActiveSession, :clean_gitlab_redis_shared_state do
it 'does not bail if there are no lookup entries' do
ActiveSession.cleanup(user)
end
context 'cleaning up old sessions' do
let(:max_number_of_sessions_plus_one) { ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS + 1 }
let(:max_number_of_sessions_plus_two) { ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS + 2 }
before do
Gitlab::Redis::SharedState.with do |redis|
(1..max_number_of_sessions_plus_two).each do |number|
redis.set(
"session:user:gitlab:#{user.id}:#{number}",
Marshal.dump(ActiveSession.new(session_id: "#{number}", updated_at: number.days.ago))
)
redis.sadd(
"session:lookup:user:gitlab:#{user.id}",
"#{number}"
)
end
end
end
it 'removes obsolete active sessions entries' do
ActiveSession.cleanup(user)
Gitlab::Redis::SharedState.with do |redis|
sessions = redis.scan_each(match: "session:user:gitlab:#{user.id}:*").to_a
expect(sessions.count).to eq(ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS)
expect(sessions).not_to include("session:user:gitlab:#{user.id}:#{max_number_of_sessions_plus_one}", "session:user:gitlab:#{user.id}:#{max_number_of_sessions_plus_two}")
end
end
it 'removes obsolete lookup entries' do
ActiveSession.cleanup(user)
Gitlab::Redis::SharedState.with do |redis|
lookup_entries = redis.smembers("session:lookup:user:gitlab:#{user.id}")
expect(lookup_entries.count).to eq(ActiveSession::ALLOWED_NUMBER_OF_ACTIVE_SESSIONS)
expect(lookup_entries).not_to include(max_number_of_sessions_plus_one.to_s, max_number_of_sessions_plus_two.to_s)
end
end
end
end
end
Loading
Loading
@@ -53,6 +53,7 @@ describe 'projects/edit' do
render
 
expect(rendered).to have_content('Remove fork relationship')
expect(rendered).to have_link(source_project.full_name, href: project_path(source_project))
end
 
it 'hides the fork relationship settings from an unauthorized user' do
Loading
Loading
@@ -78,7 +79,7 @@ describe 'projects/edit' do
render
 
expect(rendered).to have_content('Remove fork relationship')
expect(rendered).to have_content(source_project.full_name)
expect(rendered).to have_link(source_project.full_name, href: project_path(source_project))
end
end
end
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment