Skip to content
Snippets Groups Projects
Commit d88843f3 authored by GitLab Bot's avatar GitLab Bot
Browse files

Add latest changes from gitlab-org/gitlab@master

parent d8c06be4
No related branches found
No related tags found
No related merge requests found
Loading
Loading
@@ -50,8 +50,8 @@ GEM
i18n (>= 0.7, < 2)
minitest (~> 5.1)
tzinfo (~> 1.1)
acts-as-taggable-on (6.0.0)
activerecord (~> 5.0)
acts-as-taggable-on (6.5.0)
activerecord (>= 5.0, < 6.1)
adamantium (0.2.0)
ice_nine (~> 0.11.0)
memoizable (~> 0.4.0)
Loading
Loading
/* eslint-disable func-names, no-var */
/* eslint-disable func-names */
 
import $ from 'jquery';
import axios from '~/lib/utils/axios_utils';
Loading
Loading
@@ -12,11 +12,8 @@ import { __ } from '~/locale';
// more than `x` users are referenced.
//
 
var lastTextareaPreviewed;
var lastTextareaHeight = null;
var markdownPreview;
var previewButtonSelector;
var writeButtonSelector;
let lastTextareaHeight;
let lastTextareaPreviewed;
 
function MarkdownPreview() {}
 
Loading
Loading
@@ -27,14 +24,13 @@ MarkdownPreview.prototype.emptyMessage = __('Nothing to preview.');
MarkdownPreview.prototype.ajaxCache = {};
 
MarkdownPreview.prototype.showPreview = function($form) {
var mdText;
var preview = $form.find('.js-md-preview');
var url = preview.data('url');
const preview = $form.find('.js-md-preview');
const url = preview.data('url');
if (preview.hasClass('md-preview-loading')) {
return;
}
 
mdText = $form.find('textarea.markdown-area').val();
const mdText = $form.find('textarea.markdown-area').val();
 
if (mdText === undefined) {
return;
Loading
Loading
@@ -46,7 +42,7 @@ MarkdownPreview.prototype.showPreview = function($form) {
} else {
preview.addClass('md-preview-loading').text(__('Loading...'));
this.fetchMarkdownPreview(mdText, url, response => {
var body;
let body;
if (response.body.length > 0) {
({ body } = response);
} else {
Loading
Loading
@@ -91,8 +87,7 @@ MarkdownPreview.prototype.hideReferencedUsers = function($form) {
};
 
MarkdownPreview.prototype.renderReferencedUsers = function(users, $form) {
var referencedUsers;
referencedUsers = $form.find('.referenced-users');
const referencedUsers = $form.find('.referenced-users');
if (referencedUsers.length) {
if (users.length >= this.referenceThreshold) {
referencedUsers.show();
Loading
Loading
@@ -108,8 +103,7 @@ MarkdownPreview.prototype.hideReferencedCommands = function($form) {
};
 
MarkdownPreview.prototype.renderReferencedCommands = function(commands, $form) {
var referencedCommands;
referencedCommands = $form.find('.referenced-commands');
const referencedCommands = $form.find('.referenced-commands');
if (commands.length > 0) {
referencedCommands.html(commands);
referencedCommands.show();
Loading
Loading
@@ -119,15 +113,15 @@ MarkdownPreview.prototype.renderReferencedCommands = function(commands, $form) {
}
};
 
markdownPreview = new MarkdownPreview();
const markdownPreview = new MarkdownPreview();
 
previewButtonSelector = '.js-md-preview-button';
writeButtonSelector = '.js-md-write-button';
const previewButtonSelector = '.js-md-preview-button';
const writeButtonSelector = '.js-md-write-button';
lastTextareaPreviewed = null;
const markdownToolbar = $('.md-header-toolbar');
 
$.fn.setupMarkdownPreview = function() {
var $form = $(this);
const $form = $(this);
$form.find('textarea.markdown-area').on('input', () => {
markdownPreview.hideReferencedUsers($form);
});
Loading
Loading
@@ -188,7 +182,7 @@ $(document).on('markdown-preview:hide', (e, $form) => {
});
 
$(document).on('markdown-preview:toggle', (e, keyboardEvent) => {
var $target;
let $target;
$target = $(keyboardEvent.target);
if ($target.is('textarea.markdown-area')) {
$(document).triggerHandler('markdown-preview:show', [$target.closest('form')]);
Loading
Loading
@@ -201,16 +195,14 @@ $(document).on('markdown-preview:toggle', (e, keyboardEvent) => {
});
 
$(document).on('click', previewButtonSelector, function(e) {
var $form;
e.preventDefault();
$form = $(this).closest('form');
const $form = $(this).closest('form');
$(document).triggerHandler('markdown-preview:show', [$form]);
});
 
$(document).on('click', writeButtonSelector, function(e) {
var $form;
e.preventDefault();
$form = $(this).closest('form');
const $form = $(this).closest('form');
$(document).triggerHandler('markdown-preview:hide', [$form]);
});
 
Loading
Loading
/* eslint-disable func-names, one-var, no-var, no-else-return */
/* eslint-disable func-names, no-else-return */
 
import $ from 'jquery';
import { __ } from './locale';
Loading
Loading
@@ -8,9 +8,8 @@ import { capitalizeFirstCharacter } from './lib/utils/text_utility';
 
export default function initCompareAutocomplete(limitTo = null, clickHandler = () => {}) {
$('.js-compare-dropdown').each(function() {
var $dropdown, selected;
$dropdown = $(this);
selected = $dropdown.data('selected');
const $dropdown = $(this);
const selected = $dropdown.data('selected');
const $dropdownContainer = $dropdown.closest('.dropdown');
const $fieldInput = $(`input[name="${$dropdown.data('fieldName')}"]`, $dropdownContainer);
const $filterInput = $('input[type="search"]', $dropdownContainer);
Loading
Loading
@@ -44,17 +43,16 @@ export default function initCompareAutocomplete(limitTo = null, clickHandler = (
fieldName: $dropdown.data('fieldName'),
filterInput: 'input[type="search"]',
renderRow(ref) {
var link;
const link = $('<a />')
.attr('href', '#')
.addClass(ref === selected ? 'is-active' : '')
.text(ref)
.attr('data-ref', ref);
if (ref.header != null) {
return $('<li />')
.addClass('dropdown-header')
.text(ref.header);
} else {
link = $('<a />')
.attr('href', '#')
.addClass(ref === selected ? 'is-active' : '')
.text(ref)
.attr('data-ref', ref);
return $('<li />').append(link);
}
},
Loading
Loading
/* eslint-disable no-var, no-return-assign */
/* eslint-disable no-return-assign */
export default class NewCommitForm {
constructor(form) {
this.form = form;
Loading
Loading
@@ -11,8 +11,7 @@ export default class NewCommitForm {
this.renderDestination();
}
renderDestination() {
var different;
different = this.branchName.val() !== this.originalBranch.val();
const different = this.branchName.val() !== this.originalBranch.val();
if (different) {
this.createMergeRequestContainer.show();
if (!this.wasDifferent) {
Loading
Loading
---
title: Remove var from new_commit_form.js
merge_request: 20095
author: Lee Tickett
type: other
---
title: Remove var from preview_markdown.js
merge_request: 20115
author: Lee Tickett
type: other
Loading
Loading
@@ -559,6 +559,9 @@ a few things that you need to do:
including [incremental logging](../job_logs.md#new-incremental-logging-architecture).
1. Configure [object storage for LFS objects](../lfs/lfs_administration.md#storing-lfs-objects-in-remote-object-storage).
1. Configure [object storage for uploads](../uploads.md#using-object-storage-core-only).
1. Configure [object storage for Merge Request Diffs](../merge_request_diffs.md#using-object-storage).
1. Configure [object storage for Packages](../packages/index.md#using-object-storage) (Optional Feature).
1. Configure [object storage for Dependency Proxy](../packages/dependency_proxy.md#using-object-storage) (Optional Feature).
 
NOTE: **Note:**
One current feature of GitLab that still requires a shared directory (NFS) is
Loading
Loading
Loading
Loading
@@ -38,14 +38,17 @@ The following components need to be considered for a scaled or highly-available
environment. In many cases, components can be combined on the same nodes to reduce
complexity.
 
- Unicorn/Workhorse - Web-requests (UI, API, Git over HTTP)
- GitLab application nodes (Unicorn / Puma, Workhorse) - Web-requests (UI, API, Git over HTTP)
- Sidekiq - Asynchronous/Background jobs
- PostgreSQL - Database
- Consul - Database service discovery and health checks/failover
- PgBouncer - Database pool manager
- Redis - Key/Value store (User sessions, cache, queue for Sidekiq)
- Sentinel - Redis health check/failover manager
- Gitaly - Provides high-level RPC access to Git repositories
- Gitaly - Provides high-level storage and RPC access to Git repositories
- S3 Object Storage service[^3] and / or NFS storage servers[^4] for entities such as Uploads, Artifacts, LFS Objects, etc...
- Load Balancer[^2] - Main entry point and handles load balancing for the GitLab application nodes.
- Monitor - Prometheus and Grafana monitoring with auto discovery.
 
## Scalable Architecture Examples
 
Loading
Loading
@@ -67,8 +70,10 @@ larger one.
 
- 1 PostgreSQL node
- 1 Redis node
- 1 NFS/Gitaly storage server
- 2 or more GitLab application nodes (Unicorn, Workhorse, Sidekiq)
- 1 Gitaly node
- 1 or more Object Storage services[^3] and / or NFS storage server[^4]
- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq)
- 1 or more Load Balancer nodes[^2]
- 1 Monitoring node (Prometheus, Grafana)
 
#### Installation Instructions
Loading
Loading
@@ -79,8 +84,10 @@ you can continue with the next step.
 
1. [PostgreSQL](database.md#postgresql-in-a-scaled-environment)
1. [Redis](redis.md#redis-in-a-scaled-environment)
1. [Gitaly](gitaly.md) (recommended) or [NFS](nfs.md)
1. [Gitaly](gitaly.md) (recommended) and / or [NFS](nfs.md)[^4]
1. [GitLab application nodes](gitlab.md)
- With [Object Storage service enabled](../gitaly/index.md#eliminating-nfs-altogether)[^3]
1. [Load Balancer](load_balancer.md)[^2]
1. [Monitoring node (Prometheus and Grafana)](monitoring_node.md)
 
### Full Scaling
Loading
Loading
@@ -91,11 +98,13 @@ is split into separate Sidekiq and Unicorn/Workhorse nodes. One indication that
this architecture is required is if Sidekiq queues begin to periodically increase
in size, indicating that there is contention or there are not enough resources.
 
- 1 PostgreSQL node
- 1 Redis node
- 2 or more NFS/Gitaly storage servers
- 1 or more PostgreSQL node
- 1 or more Redis node
- 1 or more Gitaly storage servers
- 1 or more Object Storage services[^3] and / or NFS storage server[^4]
- 2 or more Sidekiq nodes
- 2 or more GitLab application nodes (Unicorn, Workhorse)
- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq)
- 1 or more Load Balancer nodes[^2]
- 1 Monitoring node (Prometheus, Grafana)
 
## High Availability Architecture Examples
Loading
Loading
@@ -114,10 +123,10 @@ This may lead to the other nodes believing a failure has occurred and initiating
automated failover. Isolating Redis and Consul from the services they monitor
reduces the chances of a false positive that a failure has occurred.
 
The examples below do not really address high availability of NFS. Some enterprises
have access to NFS appliances that manage availability. This is the best case
scenario. In the future, GitLab may offer a more user-friendly solution to
[GitLab HA Storage](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/2472).
The examples below do not address high availability of NFS for objects. We recommend a
S3 Object Storage service[^3] is used where possible over NFS but it's still required in
certain cases[^4]. Where NFS is to be used some enterprises have access to NFS appliances
that manage availability and this would be best case scenario.
 
There are many options in between each of these examples. Work with GitLab Support
to understand the best starting point for your workload and adapt from there.
Loading
Loading
@@ -138,8 +147,10 @@ the contention.
- 3 PostgreSQL nodes
- 2 Redis nodes
- 3 Consul/Sentinel nodes
- 2 or more GitLab application nodes (Unicorn, Workhorse, Sidekiq, PgBouncer)
- 1 NFS/Gitaly server
- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq)
- 1 Gitaly storage servers
- 1 Object Storage service[^3] and / or NFS storage server[^4]
- 1 or more Load Balancer nodes[^2]
- 1 Monitoring node (Prometheus, Grafana)
 
![Horizontal architecture diagram](img/horizontal.png)
Loading
Loading
@@ -156,8 +167,10 @@ contention due to certain workloads.
- 2 Redis nodes
- 3 Consul/Sentinel nodes
- 2 or more Sidekiq nodes
- 2 or more GitLab application nodes (Unicorn, Workhorse)
- 1 or more NFS/Gitaly servers
- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq)
- 1 Gitaly storage servers
- 1 Object Storage service[^3] and / or NFS storage server[^4]
- 1 or more Load Balancer nodes[^2]
- 1 Monitoring node (Prometheus, Grafana)
 
![Hybrid architecture diagram](img/hybrid.png)
Loading
Loading
@@ -177,45 +190,40 @@ with the added complexity of many more nodes to configure, manage, and monitor.
- 2 or more Git nodes (Git over SSH/Git over HTTP)
- 2 or more API nodes (All requests to `/api`)
- 2 or more Web nodes (All other web requests)
- 2 or more NFS/Gitaly servers
- 2 or more Gitaly storage servers
- 1 or more Object Storage services[^3] and / or NFS storage servers[^4]
- 1 or more Load Balancer nodes[^2]
- 1 Monitoring node (Prometheus, Grafana)
 
![Fully Distributed architecture diagram](img/fully-distributed.png)
 
The following pages outline the steps necessary to configure each component
separately:
## Reference Architecture Examples
 
1. [Configure the database](database.md)
1. [Configure Redis](redis.md)
1. [Configure Redis for GitLab source installations](redis_source.md)
1. [Configure NFS](nfs.md)
1. [NFS Client and Host setup](nfs_host_client_setup.md)
1. [Configure the GitLab application servers](gitlab.md)
1. [Configure the load balancers](load_balancer.md)
1. [Monitoring node (Prometheus and Grafana)](monitoring_node.md)
The Support and Quality teams build, performance test, and validate Reference
Architectures that support set large numbers of users. The specifications below are a
representation of this work so far and may be adjusted in the future based on
additional testing and iteration.
 
## Reference Architecture Examples
The architectures have been tested with specific coded workloads. The throughputs
used for testing are calculated based on sample customer data. We test each endpoint
type with the following number of requests per second (RPS) per 1000 users:
 
These reference architecture examples rely on the general rule that approximately 2 requests per second (RPS) of load is generated for every 100 users.
- API: 20 RPS
- Web: 2 RPS
- Git: 2 RPS
 
The specifications here were performance tested against a specific coded
workload. Your exact needs may be more, depending on your workload. Your
Note that your exact needs may be more, depending on your workload. Your
workload is influenced by factors such as - but not limited to - how active your
users are, how much automation you use, mirroring, and repo/change size.
 
### 10,000 User Configuration
 
- **Supported Users (approximate):** 10,000
- **RPS:** 200 requests per second
- **Test RPS Rates:** API: 200 RPS, Web: 20 RPS, Git: 20 RPS
- **Known Issues:** While validating the reference architecture, slow API endpoints
were discovered. For details, see the related issues list in
[this issue](https://gitlab.com/gitlab-org/gitlab-foss/issues/64335).
 
The Support and Quality teams built, performance tested, and validated an
environment that supports about 10,000 users. The specifications below are a
representation of the work so far. The specifications may be adjusted in the
future based on additional testing and iteration.
| Service | Configuration | GCP type |
| ------------------------------|-------------------------|----------------|
| 3 GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 32 vCPU, 28.8GB Memory | n1-highcpu-32 |
Loading
Loading
@@ -226,30 +234,23 @@ future based on additional testing and iteration.
| 3 Redis Persistent + Sentinel | 4 vCPU, 15GB Memory | n1-standard-4 |
| 4 Sidekiq | 4 vCPU, 15GB Memory | n1-standard-4 |
| 3 Consul | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| 1 NFS Server | 16 vCPU, 14.4GB Memory | n1-highcpu-16 |
| 1 NFS Server | 4 CPU, 3.6GB Memory | n1-highcpu-4 |
| X S3 Object Storage[^3] | - | - |
| 1 Monitoring node | 4 CPU, 3.6GB Memory | n1-highcpu-4 |
| 1 Load Balancing node[^2] . | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| 1 Load Balancing node[^2] | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
 
### 25,000 User Configuration
 
- **Supported Users (approximate):** 25,000
- **RPS:** 500 requests per second
- **Test RPS Rates:** API: 500 RPS, Web: 50 RPS, Git: 50 RPS
- **Known Issues:** The slow API endpoints that were discovered during testing
the 10,000 user architecture also affect the 25,000 user architecture. For
details, see the related issues list in
[this issue](https://gitlab.com/gitlab-org/gitlab-foss/issues/64335).
 
The GitLab Support and Quality teams built, performance tested, and validated an
environment that supports around 25,000 users. The specifications below are a
representation of the work so far. The specifications may be adjusted in the
future based on additional testing and iteration.
NOTE: **Note:** The specifications here were performance tested against a
specific coded workload. Your exact needs may be more, depending on your
workload. Your workload is influenced by factors such as - but not limited to -
how active your users are, how much automation you use, mirroring, and
repo/change size.
| Service | Configuration | GCP type |
| ------------------------------|-------------------------|----------------|
| 7 GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 32 vCPU, 28.8GB Memory | n1-highcpu-32 |
Loading
Loading
@@ -260,23 +261,24 @@ repo/change size.
| 3 Redis Persistent + Sentinel | 4 vCPU, 15GB Memory | n1-standard-4 |
| 4 Sidekiq | 4 vCPU, 15GB Memory | n1-standard-4 |
| 3 Consul | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| 1 NFS Server | 16 vCPU, 14.4GB Memory | n1-highcpu-16 |
| 1 NFS Server | 4 CPU, 3.6GB Memory | n1-highcpu-4 |
| X S3 Object Storage[^4] | - | - |
| 1 Monitoring node | 4 CPU, 3.6GB Memory | n1-highcpu-4 |
| 1 Load Balancing node[^2] . | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| 1 Load Balancing node[^2] | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
 
### 50,000 User Configuration
 
- **Supported Users (approximate):** 50,000
- **RPS:** 1,000 requests per second
- **Test RPS Rates:** API: 1000 RPS, Web: 100 RPS, Git: 100 RPS
- **Status:** Work-in-progress
- **Related Issue:** See the [related issue](https://gitlab.com/gitlab-org/quality/performance/issues/66) for more information.
 
The Support and Quality teams are in the process of building and performance
testing an environment that will support around 50,000 users. The specifications
below are a very rough work-in-progress representation of the work so far. The
Quality team will be certifying this environment in late 2019. The
specifications may be adjusted prior to certification based on performance
testing.
NOTE: **Note:** This architecture is a work-in-progress of the work so far. The
Quality team will be certifying this environment in late 2019. The specifications
may be adjusted prior to certification based on performance testing.
 
| Service | Configuration | GCP type |
| ------------------------------|-------------------------|----------------|
Loading
Loading
@@ -288,9 +290,13 @@ testing.
| 3 Redis Persistent + Sentinel | 4 vCPU, 15GB Memory | n1-standard-4 |
| 4 Sidekiq | 4 vCPU, 15GB Memory | n1-standard-4 |
| 3 Consul | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| 1 NFS Server | 16 vCPU, 14.4GB Memory | n1-highcpu-16 |
| 1 NFS Server | 4 CPU, 3.6GB Memory | n1-highcpu-4 |
| X S3 Object Storage[^3] | - | - |
| 1 Monitoring node | 4 CPU, 3.6GB Memory | n1-highcpu-4 |
| 1 Load Balancing node[^2] . | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| 1 Load Balancing node[^2] | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
 
[^1]: Gitaly node requirements are dependent on customer data. We recommend 2
nodes as an absolute minimum for performance at the 10,000 and 25,000 user
Loading
Loading
@@ -298,5 +304,19 @@ testing.
additional nodes should be considered in conjunction with a review of
project counts and sizes.
 
[^2]: HAProxy is the only tested and recommended load balancer. Additional
options may be supported in the future.
[^2]: Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. However other reputable load balancers with similar feature sets
should also work here but be aware these aren't validated.
[^3]: For data objects such as LFS, Uploads, Artifacts, etc... We recommend a S3 Object Storage
where possible over NFS due to better performance and availability. Several types of objects
are supported for S3 storage - [Job artifacts](../job_artifacts.md#using-object-storage),
[LFS](../lfs/lfs_administration.md#storing-lfs-objects-in-remote-object-storage),
[Uploads](../uploads.md#using-object-storage-core-only),
[Merge Request Diffs](../merge_request_diffs.md#using-object-storage),
[Packages](../packages/index.md#using-object-storage) (Optional Feature),
[Dependency Proxy](../packages/dependency_proxy.md#using-object-storage) (Optional Feature).
[^4]: NFS storage server is still required for [GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196)
and optionally for CI Job Incremental Logging
([can be switched to use Redis instead](https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture)).
Loading
Loading
@@ -66,24 +66,22 @@ module QA
expect(page).to have_content(commit_message_of_second_branch)
expect(page).to have_content(commit_message_of_third_branch)
 
Page::Project::Branches::Show.perform do |branches|
expect(branches).to have_branch_with_badge(second_branch, 'merged')
end
Page::Project::Branches::Show.perform do |branches_page|
expect(branches_page).to have_branch_with_badge(second_branch, 'merged')
 
Page::Project::Branches::Show.perform do |branches_view|
branches_view.delete_branch(third_branch)
expect(branches_view).to have_no_branch(third_branch)
end
branches_page.delete_branch(third_branch)
expect(branches_page).to have_no_branch(third_branch)
branches_page.delete_merged_branches
 
Page::Project::Branches::Show.perform(&:delete_merged_branches)
expect(branches_page).to have_content(
'Merged branches are being deleted. This can take some time depending on the number of branches. Please refresh the page to see changes.'
)
 
expect(page).to have_content(
'Merged branches are being deleted. This can take some time depending on the number of branches. Please refresh the page to see changes.'
)
branches_page.refresh
 
page.refresh
Page::Project::Branches::Show.perform do |branches_view|
expect(branches_view).to have_no_branch(second_branch, reload: true)
expect(branches_page).to have_no_branch(second_branch, reload: true)
end
end
end
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment