Skip to content
Snippets Groups Projects
Commit 3fe9588b authored by GitLab Bot's avatar GitLab Bot
Browse files

Add latest changes from gitlab-org/gitlab@master

parent ad8eea38
No related branches found
No related tags found
No related merge requests found
Showing
with 1073 additions and 47 deletions
Loading
Loading
@@ -62,6 +62,8 @@ constraints(::Constraints::GroupUrlConstrainer.new) do
delete :leave, on: :collection
end
 
resources :group_links, only: [:index, :create, :update, :destroy], constraints: { id: /\d+/ }
resources :uploads, only: [:create] do
collection do
get ":secret/:filename", action: :show, as: :show, constraints: { filename: %r{[^/]+} }
Loading
Loading
# frozen_string_literal: true
class CreateGroupGroupLinks < ActiveRecord::Migration[5.2]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
def up
create_table :group_group_links do |t|
t.timestamps_with_timezone null: false
t.references :shared_group, null: false,
index: false,
foreign_key: { on_delete: :cascade,
to_table: :namespaces }
t.references :shared_with_group, null: false,
foreign_key: { on_delete: :cascade,
to_table: :namespaces }
t.date :expires_at
t.index [:shared_group_id, :shared_with_group_id],
{ unique: true,
name: 'index_group_group_links_on_shared_group_and_shared_with_group' }
t.integer :group_access, { limit: 2,
default: 30, # Gitlab::Access::DEVELOPER
null: false }
end
end
def down
drop_table :group_group_links
end
end
# frozen_string_literal: true
class AddMarkForDeletionToProjects < ActiveRecord::Migration[5.2]
# Set this constant to true if this migration requires downtime.
DOWNTIME = false
def change
add_column :projects, :marked_for_deletion_at, :date
add_column :projects, :marked_for_deletion_by_user_id, :integer
end
end
# frozen_string_literal: true
class AddMarkForDeletionIndexesToProjects < ActiveRecord::Migration[5.2]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
add_concurrent_foreign_key :projects, :users, column: :marked_for_deletion_by_user_id, on_delete: :nullify
add_concurrent_index :projects, :marked_for_deletion_by_user_id, where: 'marked_for_deletion_by_user_id IS NOT NULL'
end
def down
remove_foreign_key_if_exists :projects, column: :marked_for_deletion_by_user_id
remove_concurrent_index :projects, :marked_for_deletion_by_user_id
end
end
# frozen_string_literal: true
class AddProjectDeletionAdjournedPeriodToApplicationSettings < ActiveRecord::Migration[5.2]
DOWNTIME = false
DEFAULT_NUMBER_OF_DAYS_BEFORE_REMOVAL = 7
def change
add_column :application_settings, :deletion_adjourned_period, :integer, default: DEFAULT_NUMBER_OF_DAYS_BEFORE_REMOVAL, null: false
end
end
# frozen_string_literal: true
# See http://doc.gitlab.com/ce/development/migration_style_guide.html
# for more information on how to write migrations for GitLab.
class ScheduleEpicIssuesAfterEpicsMove < ActiveRecord::Migration[5.2]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
INTERVAL = 5.minutes.to_i
BATCH_SIZE = 100
MIGRATION = 'MoveEpicIssuesAfterEpics'
disable_ddl_transaction!
class Epic < ActiveRecord::Base
self.table_name = 'epics'
include ::EachBatch
end
def up
return unless ::Gitlab.ee?
Epic.each_batch(of: BATCH_SIZE) do |batch, index|
range = batch.pluck('MIN(id)', 'MAX(id)').first
delay = index * interval
BackgroundMigrationWorker.perform_in(delay, MIGRATION, *range)
end
end
def down
# no need
end
end
Loading
Loading
@@ -343,6 +343,7 @@ ActiveRecord::Schema.define(version: 2019_10_26_124116) do
t.string "custom_http_clone_url_root", limit: 511
t.boolean "pendo_enabled", default: false, null: false
t.string "pendo_url", limit: 255
t.integer "deletion_adjourned_period", default: 7, null: false
t.index ["custom_project_templates_group_id"], name: "index_application_settings_on_custom_project_templates_group_id"
t.index ["file_template_project_id"], name: "index_application_settings_on_file_template_project_id"
t.index ["instance_administration_project_id"], name: "index_applicationsettings_on_instance_administration_project_id"
Loading
Loading
@@ -1820,6 +1821,17 @@ ActiveRecord::Schema.define(version: 2019_10_26_124116) do
t.index ["key", "value"], name: "index_group_custom_attributes_on_key_and_value"
end
 
create_table "group_group_links", force: :cascade do |t|
t.datetime_with_timezone "created_at", null: false
t.datetime_with_timezone "updated_at", null: false
t.bigint "shared_group_id", null: false
t.bigint "shared_with_group_id", null: false
t.date "expires_at"
t.integer "group_access", limit: 2, default: 30, null: false
t.index ["shared_group_id", "shared_with_group_id"], name: "index_group_group_links_on_shared_group_and_shared_with_group", unique: true
t.index ["shared_with_group_id"], name: "index_group_group_links_on_shared_with_group_id"
end
create_table "historical_data", id: :serial, force: :cascade do |t|
t.date "date", null: false
t.integer "active_user_count"
Loading
Loading
@@ -3062,6 +3074,8 @@ ActiveRecord::Schema.define(version: 2019_10_26_124116) do
t.integer "max_artifacts_size"
t.string "pull_mirror_branch_prefix", limit: 50
t.boolean "remove_source_branch_after_merge"
t.date "marked_for_deletion_at"
t.integer "marked_for_deletion_by_user_id"
t.index "lower((name)::text)", name: "index_projects_on_lower_name"
t.index ["archived", "pending_delete", "merge_requests_require_code_owner_approval"], name: "projects_requiring_code_owner_approval", where: "((pending_delete = false) AND (archived = false) AND (merge_requests_require_code_owner_approval = true))"
t.index ["created_at"], name: "index_projects_on_created_at"
Loading
Loading
@@ -3074,6 +3088,7 @@ ActiveRecord::Schema.define(version: 2019_10_26_124116) do
t.index ["last_repository_check_at"], name: "index_projects_on_last_repository_check_at", where: "(last_repository_check_at IS NOT NULL)"
t.index ["last_repository_check_failed"], name: "index_projects_on_last_repository_check_failed"
t.index ["last_repository_updated_at"], name: "index_projects_on_last_repository_updated_at"
t.index ["marked_for_deletion_by_user_id"], name: "index_projects_on_marked_for_deletion_by_user_id", where: "(marked_for_deletion_by_user_id IS NOT NULL)"
t.index ["mirror_last_successful_update_at"], name: "index_projects_on_mirror_last_successful_update_at"
t.index ["mirror_user_id"], name: "index_projects_on_mirror_user_id"
t.index ["name"], name: "index_projects_on_name_trigram", opclass: :gin_trgm_ops, using: :gin
Loading
Loading
@@ -4220,6 +4235,8 @@ ActiveRecord::Schema.define(version: 2019_10_26_124116) do
add_foreign_key "gpg_signatures", "projects", on_delete: :cascade
add_foreign_key "grafana_integrations", "projects", on_delete: :cascade
add_foreign_key "group_custom_attributes", "namespaces", column: "group_id", on_delete: :cascade
add_foreign_key "group_group_links", "namespaces", column: "shared_group_id", on_delete: :cascade
add_foreign_key "group_group_links", "namespaces", column: "shared_with_group_id", on_delete: :cascade
add_foreign_key "identities", "saml_providers", name: "fk_aade90f0fc", on_delete: :cascade
add_foreign_key "import_export_uploads", "projects", on_delete: :cascade
add_foreign_key "index_statuses", "projects", name: "fk_74b2492545", on_delete: :cascade
Loading
Loading
@@ -4343,6 +4360,7 @@ ActiveRecord::Schema.define(version: 2019_10_26_124116) do
add_foreign_key "project_statistics", "projects", on_delete: :cascade
add_foreign_key "project_tracing_settings", "projects", on_delete: :cascade
add_foreign_key "projects", "pool_repositories", name: "fk_6e5c14658a", on_delete: :nullify
add_foreign_key "projects", "users", column: "marked_for_deletion_by_user_id", name: "fk_25d8780d11", on_delete: :nullify
add_foreign_key "prometheus_alert_events", "projects", on_delete: :cascade
add_foreign_key "prometheus_alert_events", "prometheus_alerts", on_delete: :cascade
add_foreign_key "prometheus_alerts", "environments", on_delete: :cascade
Loading
Loading
Loading
Loading
@@ -368,7 +368,7 @@ Enterprise Edition instance. This has some implications:
- [Background migrations](background_migrations.md) run in Sidekiq, and
should only be done for migrations that would take an extreme amount of
time at GitLab.com scale.
1. **Sidekiq workers** [cannot change in a backwards-incompatible way](sidekiq_style_guide.md#removing-or-renaming-queues):
1. **Sidekiq workers** [cannot change in a backwards-incompatible way](sidekiq_style_guide.md#sidekiq-compatibility-across-updates):
1. Sidekiq queues are not drained before a deploy happens, so there will be
workers in the queue from the previous version of GitLab.
1. If you need to change a method signature, try to do so across two releases,
Loading
Loading
Loading
Loading
@@ -61,6 +61,168 @@ the extra jobs will take resources away from jobs from workers that were already
there, if the resources available to the Sidekiq process handling the namespace
are not adjusted appropriately.
 
## Latency Sensitive Jobs
If a large number of background jobs get scheduled at once, queueing of jobs may
occur while jobs wait for a worker node to be become available. This is normal
and gives the system resilience by allowing it to gracefully handle spikes in
traffic. Some jobs, however, are more sensitive to latency than others. Examples
of these jobs include:
1. A job which updates a merge request following a push to a branch.
1. A job which invalidates a cache of known branches for a project after a push
to the branch.
1. A job which recalculates the groups and projects a user can see after a
change in permissions.
1. A job which updates the status of a CI pipeline after a state change to a job
in the pipeline.
When these jobs are delayed, the user may perceive the delay as a bug: for
example, they may push a branch and then attempt to create a merge request for
that branch, but be told in the UI that the branch does not exist. We deem these
jobs to be `latency_sensitive`.
Extra effort is made to ensure that these jobs are started within a very short
period of time after being scheduled. However, in order to ensure throughput,
these jobs also have very strict execution duration requirements:
1. The median job execution time should be less than 1 second.
1. 99% of jobs should complete within 10 seconds.
If a worker cannot meet these expectations, then it cannot be treated as a
`latency_sensitive` worker: consider redesigning the worker, or splitting the
work between two different workers, one with `latency_sensitive` code that
executes quickly, and the other with non-`latency_sensitive`, which has no
execution latency requirements (but also has lower scheduling targets).
This can be summed up in the following table:
| **Latency Sensitivity** | **Queue Scheduling Target** | **Execution Latency Requirement** |
|-------------------------|-----------------------------|-------------------------------------|
| Not `latency_sensitive` | 1 minute | Maximum run time of 1 hour |
| `latency_sensitive` | 100 milliseconds | p50 of 1 second, p99 of 10 seconds |
To mark a worker as being `latency_sensitive`, use the
`latency_sensitive_worker!` attribute, as shown in this example:
```ruby
class LatencySensitiveWorker
include ApplicationWorker
latency_sensitive_worker!
# ...
end
```
## Jobs with External Dependencies
Most background jobs in the GitLab application communicate with other GitLab
services, eg Postgres, Redis, Gitaly and Object Storage. These are considered
to be "internal" dependencies for a job.
However, some jobs will be dependent on external services in order to complete
successfully. Some examples include:
1. Jobs which call web-hooks configured by a user.
1. Jobs which deploy an application to a k8s cluster configured by a user.
These jobs have "external dependencies". This is important for the operation of
the background processing cluster in several ways:
1. Most external dependencies (such as web-hooks) do not provide SLOs, and
therefore we cannot guarantee the execution latencies on these jobs. Since we
cannot guarantee execution latency, we cannot ensure throughput and
therefore, in high-traffic environments, we need to ensure that jobs with
external dependencies are separated from `latency_sensitive` jobs, to ensure
throughput on those queues.
1. Errors in jobs with external dependencies have higher alerting thresholds as
there is a likelihood that the cause of the error is external.
```ruby
class ExternalDependencyWorker
include ApplicationWorker
# Declares that this worker depends on
# third-party, external services in order
# to complete successfully
worker_has_external_dependencies!
# ...
end
```
NOTE: **Note:** Note that a job cannot be both latency sensitive and have
external dependencies.
## CPU-bound and Memory-bound Workers
Workers that are constrained by CPU or memory resource limitations should be
annotated with the `worker_resource_boundary` method.
Most workers tend to spend most of their time blocked, wait on network responses
from other services such as Redis, Postgres and Gitaly. Since Sidekiq is a
multithreaded environment, these jobs can be scheduled with high concurrency.
Some workers, however, spend large amounts of time _on-cpu_ running logic in
Ruby. Ruby MRI does not support true multithreading - it relies on the
[GIL](https://thoughtbot.com/blog/untangling-ruby-threads#the-global-interpreter-lock)
to greatly simplify application development by only allowing one section of Ruby
code in a process to run at a time, no matter how many cores the machine
hosting the process has. For IO bound workers, this is not a problem, since most
of the threads are blocked in underlying libraries (which are outside of the
GIL).
If many threads are attempting to run Ruby code simultaneously, this will lead
to contention on the GIL which will have the affect of slowing down all
processes.
In high-traffic environments, knowing that a worker is CPU-bound allows us to
run it on a different fleet with lower concurrency. This ensures optimal
performance.
Likewise, if a worker uses large amounts of memory, we can run these on a
bespoke low concurrency, high memory fleet.
Note that Memory-bound workers create heavy GC workloads, with pauses of
10-50ms. This will have an impact on the latency requirements for the
worker. For this reason, `memory` bound, `latency_sensitive` jobs are not
permitted and will fail CI. In general, `memory` bound workers are
discouraged, and alternative approaches to processing the work should be
considered.
## Declaring a Job as CPU-bound
This example shows how to declare a job as being CPU-bound.
```ruby
class CPUIntensiveWorker
include ApplicationWorker
# Declares that this worker will perform a lot of
# calculations on-CPU.
worker_resource_boundary :cpu
# ...
end
```
## Determining whether a worker is CPU-bound
We use the following approach to determine whether a worker is CPU-bound:
- In the sidekiq structured JSON logs, aggregate the worker `duration` and
`cpu_s` fields.
- `duration` refers to the total job execution duration, in seconds
- `cpu_s` is derived from the
[`Process::CLOCK_THREAD_CPUTIME_ID`](https://www.rubydoc.info/stdlib/core/Process:clock_gettime)
counter, and is a measure of time spent by the job on-CPU.
- Divide `cpu_s` by `duration` to get the percentage time spend on-CPU.
- If this ratio exceeds 33%, the worker is considered CPU-bound and should be
annotated as such.
- Note that these values should not be used over small sample sizes, but
rather over fairly large aggregates.
## Feature Categorization
 
Each Sidekiq worker, or one of its ancestor classes, must declare a
Loading
Loading
@@ -74,7 +236,7 @@ The declaration uses the `feature_category` class method, as shown below.
class SomeScheduledTaskWorker
include ApplicationWorker
 
# Declares that this feature is part of the
# Declares that this worker is part of the
# `continuous_integration` feature category
feature_category :continuous_integration
 
Loading
Loading
@@ -88,11 +250,11 @@ source](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml
 
### Updating `config/feature_categories.yml`
 
Occassionally new features will be added to GitLab stages. When this occurs, you
Occasionally new features will be added to GitLab stages. When this occurs, you
can automatically update `config/feature_categories.yml` by running
`scripts/update-feature-categories`. This script will fetch and parse
[`stages.yml`](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml)
and generare a new version of the file, which needs to be checked into source control.
and generate a new version of the file, which needs to be checked into source control.
 
### Excluding Sidekiq workers from feature categorization
 
Loading
Loading
@@ -116,9 +278,63 @@ end
Each Sidekiq worker must be tested using RSpec, just like any other class. These
tests should be placed in `spec/workers`.
 
## Removing or renaming queues
## Sidekiq Compatibility across Updates
Keep in mind that the arguments for a Sidekiq job are stored in a queue while it
is scheduled for execution. During a online update, this could lead to several
possible situations:
1. An older version of the application publishes a job, which is executed by an
upgraded Sidekiq node.
1. A job is queued before an upgrade, but executed after an upgrade.
1. A job is queued by a node running the newer version of the application, but
executed on a node running an older version of the application.
### Changing the arguments for a worker
Jobs need to be backwards- and forwards-compatible between consecutive versions
of the application.
This can be done by following this process:
1. **Do not remove arguments from the `perform` function.**. Instead, use the
following approach
1. Provide a default value (usually `nil`) and use a comment to mark the
argument as deprecated
1. Stop using the argument in `perform_async`.
1. Ignore the value in the worker class, but do not remove it until the next
major release.
### Removing workers
Try to avoid removing workers and their queues in minor and patch
releases.
 
Try to avoid renaming or removing workers and their queues in minor and patch releases.
During online update instance can have pending jobs and removing the queue can
lead to those jobs being stuck forever. If you can't write migration for those
Sidekiq jobs, please consider doing rename or remove queue in major release only.
Sidekiq jobs, please consider removing the worker in a major release only.
### Renaming queues
For the same reasons that removing workers is dangerous, care should be taken
when renaming queues.
When renaming queues, use the `sidekiq_queue_migrate` helper migration method,
as show in this example:
```ruby
class MigrateTheRenamedSidekiqQueue < ActiveRecord::Migration[5.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
def up
sidekiq_queue_migrate 'old_queue_name', to: 'new_queue_name'
end
def down
sidekiq_queue_migrate 'new_queue_name', to: 'old_queue_name'
end
end
```
Loading
Loading
@@ -969,7 +969,6 @@ The following table lists variables related to security tools.
 
| **Variable** | **Description** |
| `SAST_CONFIDENCE_LEVEL` | Minimum confidence level of security issues you want to be reported; `1` for Low, `2` for Medium, `3` for High. Defaults to `3`. |
| `DS_DISABLE_REMOTE_CHECKS` | Whether remote Dependency Scanning checks are disabled. Defaults to `"false"`. Set to `"true"` to disable checks that send data to GitLab central servers. [Read more about remote checks](../../user/application_security/dependency_scanning/index.md#remote-checks). |
 
#### Disable jobs
 
Loading
Loading
Loading
Loading
@@ -63,23 +63,6 @@ The following languages and dependency managers are supported.
| Python ([poetry](https://poetry.eustace.io/)) | not currently ([issue](https://gitlab.com/gitlab-org/gitlab/issues/7006 "Support Poetry in Dependency Scanning")) | not available |
| Ruby ([gem](https://rubygems.org/)) | yes | [gemnasium](https://gitlab.com/gitlab-org/security-products/gemnasium), [bundler-audit](https://github.com/rubysec/bundler-audit) |
 
## Remote checks
While some tools pull a local database to check vulnerabilities, some others
like Gemnasium require sending data to GitLab central servers to analyze them:
1. Gemnasium scans the dependencies of your project locally and sends a list of
packages to GitLab central servers.
1. The servers return the list of known vulnerabilities for all versions of
these packages.
1. The client picks up the relevant vulnerabilities by comparing with the versions
of the packages that are used by the project.
The Gemnasium client does **NOT** send the exact package versions your project relies on.
You can disable the remote checks by [using](#customizing-the-dependency-scanning-settings)
the `DS_DISABLE_REMOTE_CHECKS` environment variable and setting it to `"true"`.
## Configuration
 
For GitLab 11.9 and later, to enable Dependency Scanning, you must
Loading
Loading
@@ -116,7 +99,7 @@ include:
template: Dependency-Scanning.gitlab-ci.yml
 
variables:
DS_DISABLE_REMOTE_CHECKS: "true"
DS_PYTHON_VERSION: 2
```
 
Because template is [evaluated before](../../../ci/yaml/README.md#include) the pipeline
Loading
Loading
@@ -150,7 +133,6 @@ using environment variables.
| `DS_PYTHON_VERSION` | Version of Python. If set to 2, dependencies are installed using Python 2.7 instead of Python 3.6. ([Introduced](https://gitlab.com/gitlab-org/gitlab/issues/12296) in GitLab 12.1)| |
| `DS_PIP_DEPENDENCY_PATH` | Path to load Python pip dependencies from. ([Introduced](https://gitlab.com/gitlab-org/gitlab/issues/12412) in GitLab 12.2) | |
| `DS_DEFAULT_ANALYZERS` | Override the names of the official default images. Read more about [customizing analyzers](analyzers.md). | |
| `DS_DISABLE_REMOTE_CHECKS` | Do not send any data to GitLab. Used in the [Gemnasium analyzer](#remote-checks). | |
| `DS_PULL_ANALYZER_IMAGES` | Pull the images from the Docker registry (set to `0` to disable). | |
| `DS_EXCLUDED_PATHS` | Exclude vulnerabilities from output based on the paths. A comma-separated list of patterns. Patterns can be globs, file or folder paths. Parent directories will also match patterns. | `DS_EXCLUDED_PATHS=doc,spec` |
| `DS_DOCKER_CLIENT_NEGOTIATION_TIMEOUT` | Time limit for Docker client negotiation. Timeouts are parsed using Go's [`ParseDuration`](https://golang.org/pkg/time/#ParseDuration). Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For example, `300ms`, `1.5h`, or `2h45m`. | |
Loading
Loading
Loading
Loading
@@ -148,6 +148,8 @@ excluded_attributes:
- :emails_disabled
- :max_pages_size
- :max_artifacts_size
- :marked_for_deletion_at
- :marked_for_deletion_by_user_id
namespaces:
- :runners_token
- :runners_token_encrypted
Loading
Loading
Loading
Loading
@@ -57,7 +57,7 @@ module Gitlab
private
 
# Builds a recursive CTE that gets all the groups the current user has
# access to, including any nested groups.
# access to, including any nested groups and any shared groups.
def recursive_cte
cte = Gitlab::SQL::RecursiveCTE.new(:namespaces_cte)
members = Member.arel_table
Loading
Loading
@@ -68,20 +68,27 @@ module Gitlab
.select([namespaces[:id], members[:access_level]])
.except(:order)
 
if Feature.enabled?(:share_group_with_group)
# Namespaces shared with any of the group
cte << Group.select([namespaces[:id], 'group_group_links.group_access AS access_level'])
.joins(join_group_group_links)
.joins(join_members_on_group_group_links)
end
# Sub groups of any groups the user is a member of.
cte << Group.select([
namespaces[:id],
greatest(members[:access_level], cte.table[:access_level], 'access_level')
])
.joins(join_cte(cte))
.joins(join_members)
.joins(join_members_on_namespaces)
.except(:order)
 
cte
end
 
# Builds a LEFT JOIN to join optional memberships onto the CTE.
def join_members
def join_members_on_namespaces
members = Member.arel_table
namespaces = Namespace.arel_table
 
Loading
Loading
@@ -94,6 +101,23 @@ module Gitlab
Arel::Nodes::OuterJoin.new(members, Arel::Nodes::On.new(cond))
end
 
def join_group_group_links
group_group_links = GroupGroupLink.arel_table
namespaces = Namespace.arel_table
cond = group_group_links[:shared_group_id].eq(namespaces[:id])
Arel::Nodes::InnerJoin.new(group_group_links, Arel::Nodes::On.new(cond))
end
def join_members_on_group_group_links
group_group_links = GroupGroupLink.arel_table
members = Member.arel_table
cond = group_group_links[:shared_with_group_id].eq(members[:source_id])
.and(members[:user_id].eq(user.id))
Arel::Nodes::InnerJoin.new(members, Arel::Nodes::On.new(cond))
end
# Builds an INNER JOIN to join namespaces onto the CTE.
def join_cte(cte)
namespaces = Namespace.arel_table
Loading
Loading
Loading
Loading
@@ -5628,9 +5628,6 @@ msgstr ""
msgid "Disable"
msgstr ""
 
msgid "Disable email notifications"
msgstr ""
msgid "Disable for this project"
msgstr ""
 
Loading
Loading
@@ -11202,9 +11199,6 @@ msgstr ""
msgid "Note: Consider asking your GitLab administrator to configure %{github_integration_link}, which will allow login via GitHub and allow importing repositories without generating a Personal Access Token."
msgstr ""
 
msgid "Note: the container registry is always visible when a project is public"
msgstr ""
msgid "NoteForm|Note"
msgstr ""
 
Loading
Loading
@@ -12869,12 +12863,18 @@ msgstr ""
msgid "ProjectSettings|All discussions must be resolved"
msgstr ""
 
msgid "ProjectSettings|Allow users to request access"
msgstr ""
msgid "ProjectSettings|Automatically resolve merge request diff discussions when they become outdated"
msgstr ""
 
msgid "ProjectSettings|Badges"
msgstr ""
 
msgid "ProjectSettings|Build, test, and deploy your changes"
msgstr ""
msgid "ProjectSettings|Choose your merge method, merge options, and merge checks."
msgstr ""
 
Loading
Loading
@@ -12884,15 +12884,30 @@ msgstr ""
msgid "ProjectSettings|Contact an admin to change this setting."
msgstr ""
 
msgid "ProjectSettings|Container registry"
msgstr ""
msgid "ProjectSettings|Customize your project badges."
msgstr ""
 
msgid "ProjectSettings|Disable email notifications"
msgstr ""
msgid "ProjectSettings|Enable 'Delete source branch' option by default"
msgstr ""
 
msgid "ProjectSettings|Every merge creates a merge commit"
msgstr ""
 
msgid "ProjectSettings|Every project can have its own space to store its Docker images"
msgstr ""
msgid "ProjectSettings|Every project can have its own space to store its packages"
msgstr ""
msgid "ProjectSettings|Everyone"
msgstr ""
msgid "ProjectSettings|Existing merge requests and protected branches are not affected"
msgstr ""
 
Loading
Loading
@@ -12908,9 +12923,24 @@ msgstr ""
msgid "ProjectSettings|Fast-forward merges only"
msgstr ""
 
msgid "ProjectSettings|Git Large File Storage"
msgstr ""
msgid "ProjectSettings|Internal"
msgstr ""
msgid "ProjectSettings|Issues"
msgstr ""
msgid "ProjectSettings|Learn more about badges."
msgstr ""
 
msgid "ProjectSettings|Lightweight issue tracking system for this project"
msgstr ""
msgid "ProjectSettings|Manages large files such as audio, video, and graphics files"
msgstr ""
msgid "ProjectSettings|Merge checks"
msgstr ""
 
Loading
Loading
@@ -12929,24 +12959,60 @@ msgstr ""
msgid "ProjectSettings|Merge pipelines will try to validate the post-merge result prior to merging"
msgstr ""
 
msgid "ProjectSettings|Merge requests"
msgstr ""
msgid "ProjectSettings|No merge commits are created"
msgstr ""
 
msgid "ProjectSettings|Note: the container registry is always visible when a project is public"
msgstr ""
msgid "ProjectSettings|Only signed commits can be pushed to this repository."
msgstr ""
 
msgid "ProjectSettings|Packages"
msgstr ""
msgid "ProjectSettings|Pages"
msgstr ""
 
msgid "ProjectSettings|Pages for project documentation"
msgstr ""
msgid "ProjectSettings|Pipelines"
msgstr ""
msgid "ProjectSettings|Pipelines must succeed"
msgstr ""
 
msgid "ProjectSettings|Pipelines need to be configured to enable this feature."
msgstr ""
 
msgid "ProjectSettings|Private"
msgstr ""
msgid "ProjectSettings|Project visibility"
msgstr ""
msgid "ProjectSettings|Public"
msgstr ""
msgid "ProjectSettings|Repository"
msgstr ""
msgid "ProjectSettings|Share code pastes with others out of Git repository"
msgstr ""
msgid "ProjectSettings|Show link to create/view merge request when pushing from the command line"
msgstr ""
 
msgid "ProjectSettings|Snippets"
msgstr ""
msgid "ProjectSettings|Submit changes to be merged upstream"
msgstr ""
msgid "ProjectSettings|These checks must pass before merge requests can be merged"
msgstr ""
 
Loading
Loading
@@ -12959,15 +13025,27 @@ msgstr ""
msgid "ProjectSettings|This setting will be applied to all projects unless overridden by an admin."
msgstr ""
 
msgid "ProjectSettings|This setting will override user notification preferences for all project members."
msgstr ""
msgid "ProjectSettings|This will dictate the commit history when you merge a merge request"
msgstr ""
 
msgid "ProjectSettings|Users can only push commits to this repository that were committed with one of their own verified emails."
msgstr ""
 
msgid "ProjectSettings|View and edit files in this project"
msgstr ""
msgid "ProjectSettings|When conflicts arise the user is given the option to rebase"
msgstr ""
 
msgid "ProjectSettings|Wiki"
msgstr ""
msgid "ProjectSettings|With GitLab Pages you can host your static websites on GitLab"
msgstr ""
msgid "ProjectTemplates|.NET Core"
msgstr ""
 
Loading
Loading
@@ -16437,6 +16515,9 @@ msgstr ""
msgid "The group and its projects can only be viewed by members."
msgstr ""
 
msgid "The group has already been shared with this group"
msgstr ""
msgid "The group settings for %{group_links} require you to enable Two-Factor Authentication for your account. You can %{leave_group_links}."
msgstr ""
 
Loading
Loading
@@ -17031,9 +17112,6 @@ msgstr ""
msgid "This setting can be overridden in each project."
msgstr ""
 
msgid "This setting will override user notification preferences for all project members."
msgstr ""
msgid "This setting will update the hostname that is used to generate private commit emails. %{learn_more}"
msgstr ""
 
Loading
Loading
@@ -18929,9 +19007,6 @@ msgstr ""
msgid "Will deploy to"
msgstr ""
 
msgid "With GitLab Pages you can host your static websites on GitLab"
msgstr ""
msgid "With contribution analytics you can have an overview for the activity of issues, merge requests and push events of your organization and its members."
msgstr ""
 
Loading
Loading
Loading
Loading
@@ -195,9 +195,22 @@ function download_chart() {
helm dependency build .
}
 
function base_config_changed() {
git fetch origin master --depth=50
[ -n "$(git diff origin/master... --name-only -- scripts/review_apps/base-config.yaml)" ]
}
function deploy() {
local name="$CI_ENVIRONMENT_SLUG"
local edition="${GITLAB_EDITION-ce}"
local base_config_file_ref="master"
echo "REVIEW_APP_CONFIG_CHANGED: ${REVIEW_APP_CONFIG_CHANGED}"
if [ -n "${REVIEW_APP_CONFIG_CHANGED}" ]; then
base_config_file_ref="$CI_COMMIT_SHA"
fi
local base_config_file="https://gitlab.com/gitlab-org/gitlab/raw/${base_config_file_ref}/scripts/review_apps/base-config.yaml"
echoinfo "Deploying ${name}..." true
 
IMAGE_REPOSITORY="registry.gitlab.com/gitlab-org/build/cng-mirror"
Loading
Loading
@@ -240,11 +253,11 @@ EOF
)
 
HELM_CMD=$(cat << EOF
$HELM_CMD \
${HELM_CMD} \
--namespace="$KUBE_NAMESPACE" \
--version="$CI_PIPELINE_ID-$CI_JOB_ID" \
-f "../scripts/review_apps/base-config.yaml" \
"$name" .
--version="${CI_PIPELINE_ID}-${CI_JOB_ID}" \
-f "${base_config_file}" \
"${name}" .
EOF
)
 
Loading
Loading
# frozen_string_literal: true
require 'spec_helper'
describe Groups::GroupLinksController do
let(:shared_with_group) { create(:group, :private) }
let(:shared_group) { create(:group, :private) }
let(:user) { create(:user) }
before do
sign_in(user)
end
describe '#create' do
let(:shared_with_group_id) { shared_with_group.id }
subject do
post(:create,
params: { group_id: shared_group,
shared_with_group_id: shared_with_group_id,
shared_group_access: GroupGroupLink.default_access })
end
context 'when user has correct access to both groups' do
let(:group_member) { create(:user) }
before do
shared_with_group.add_developer(user)
shared_group.add_owner(user)
shared_with_group.add_developer(group_member)
end
it 'links group with selected group' do
expect { subject }.to change { shared_with_group.shared_groups.include?(shared_group) }.from(false).to(true)
end
it 'redirects to group links page' do
subject
expect(response).to(redirect_to(group_group_members_path(shared_group)))
end
it 'allows access for group member' do
expect { subject }.to change { group_member.can?(:read_group, shared_group) }.from(false).to(true)
end
context 'when shared with group id is not present' do
let(:shared_with_group_id) { nil }
it 'redirects to group links page' do
subject
expect(response).to(redirect_to(group_group_members_path(shared_group)))
expect(flash[:alert]).to eq('Please select a group.')
end
end
context 'when link is not persisted in the database' do
before do
allow(::Groups::GroupLinks::CreateService).to(
receive_message_chain(:new, :execute)
.and_return({ status: :error,
http_status: 409,
message: 'error' }))
end
it 'redirects to group links page' do
subject
expect(response).to(redirect_to(group_group_members_path(shared_group)))
expect(flash[:alert]).to eq('error')
end
end
context 'when feature flag is disabled' do
before do
stub_feature_flags(share_group_with_group: false)
end
it 'renders 404' do
subject
expect(response).to have_gitlab_http_status(404)
end
end
end
context 'when user does not have access to the group' do
before do
shared_group.add_owner(user)
end
it 'renders 404' do
subject
expect(response).to have_gitlab_http_status(404)
end
end
context 'when user does not have admin access to the shared group' do
before do
shared_with_group.add_developer(user)
shared_group.add_developer(user)
end
it 'renders 404' do
subject
expect(response).to have_gitlab_http_status(404)
end
end
end
end
# frozen_string_literal: true
FactoryBot.define do
factory :group_group_link do
shared_group { create(:group) }
shared_with_group { create(:group) }
group_access { GroupMember::DEVELOPER }
end
end
import Anomaly from '~/monitoring/components/charts/anomaly.vue';
import { shallowMount } from '@vue/test-utils';
import { colorValues } from '~/monitoring/constants';
import {
anomalyDeploymentData,
mockProjectDir,
anomalyMockGraphData,
anomalyMockResultValues,
} from '../../mock_data';
import { TEST_HOST } from 'helpers/test_constants';
import MonitorTimeSeriesChart from '~/monitoring/components/charts/time_series.vue';
const mockWidgets = 'mockWidgets';
const mockProjectPath = `${TEST_HOST}${mockProjectDir}`;
jest.mock('~/lib/utils/icon_utils'); // mock getSvgIconPathContent
const makeAnomalyGraphData = (datasetName, template = anomalyMockGraphData) => {
const queries = anomalyMockResultValues[datasetName].map((values, index) => ({
...template.queries[index],
result: [
{
metrics: {},
values,
},
],
}));
return { ...template, queries };
};
describe('Anomaly chart component', () => {
let wrapper;
const setupAnomalyChart = props => {
wrapper = shallowMount(Anomaly, {
propsData: { ...props },
slots: {
default: mockWidgets,
},
sync: false,
});
};
const findTimeSeries = () => wrapper.find(MonitorTimeSeriesChart);
const getTimeSeriesProps = () => findTimeSeries().props();
describe('wrapped monitor-time-series-chart component', () => {
const dataSetName = 'noAnomaly';
const dataSet = anomalyMockResultValues[dataSetName];
const inputThresholds = ['some threshold'];
beforeEach(() => {
setupAnomalyChart({
graphData: makeAnomalyGraphData(dataSetName),
deploymentData: anomalyDeploymentData,
thresholds: inputThresholds,
projectPath: mockProjectPath,
});
});
it('is a Vue instance', () => {
expect(findTimeSeries().exists()).toBe(true);
expect(findTimeSeries().isVueInstance()).toBe(true);
});
describe('receives props correctly', () => {
describe('graph-data', () => {
it('receives a single "metric" series', () => {
const { graphData } = getTimeSeriesProps();
expect(graphData.queries.length).toBe(1);
});
it('receives "metric" with all data', () => {
const { graphData } = getTimeSeriesProps();
const query = graphData.queries[0];
const expectedQuery = makeAnomalyGraphData(dataSetName).queries[0];
expect(query).toEqual(expectedQuery);
});
it('receives the "metric" results', () => {
const { graphData } = getTimeSeriesProps();
const { result } = graphData.queries[0];
const { values } = result[0];
const [metricDataset] = dataSet;
expect(values).toEqual(expect.any(Array));
values.forEach(([, y], index) => {
expect(y).toBeCloseTo(metricDataset[index][1]);
});
});
});
describe('option', () => {
let option;
let series;
beforeEach(() => {
({ option } = getTimeSeriesProps());
({ series } = option);
});
it('contains a boundary band', () => {
expect(series).toEqual(expect.any(Array));
expect(series.length).toEqual(2); // 1 upper + 1 lower boundaries
expect(series[0].stack).toEqual(series[1].stack);
series.forEach(s => {
expect(s.type).toBe('line');
expect(s.lineStyle.width).toBe(0);
expect(s.lineStyle.color).toMatch(/rgba\(.+\)/);
expect(s.lineStyle.color).toMatch(s.color);
expect(s.symbol).toEqual('none');
});
});
it('upper boundary values are stacked on top of lower boundary', () => {
const [lowerSeries, upperSeries] = series;
const [, upperDataset, lowerDataset] = dataSet;
lowerSeries.data.forEach(([, y], i) => {
expect(y).toBeCloseTo(lowerDataset[i][1]);
});
upperSeries.data.forEach(([, y], i) => {
expect(y).toBeCloseTo(upperDataset[i][1] - lowerDataset[i][1]);
});
});
});
describe('series-config', () => {
let seriesConfig;
beforeEach(() => {
({ seriesConfig } = getTimeSeriesProps());
});
it('display symbols is enabled', () => {
expect(seriesConfig).toEqual(
expect.objectContaining({
type: 'line',
symbol: 'circle',
showSymbol: true,
symbolSize: expect.any(Function),
itemStyle: {
color: expect.any(Function),
},
}),
);
});
it('does not display anomalies', () => {
const { symbolSize, itemStyle } = seriesConfig;
const [metricDataset] = dataSet;
metricDataset.forEach((v, dataIndex) => {
const size = symbolSize(null, { dataIndex });
const color = itemStyle.color({ dataIndex });
// normal color and small size
expect(size).toBeCloseTo(0);
expect(color).toBe(colorValues.primaryColor);
});
});
it('can format y values (to use in tooltips)', () => {
expect(parseFloat(wrapper.vm.yValueFormatted(0, 0))).toEqual(dataSet[0][0][1]);
expect(parseFloat(wrapper.vm.yValueFormatted(1, 0))).toEqual(dataSet[1][0][1]);
expect(parseFloat(wrapper.vm.yValueFormatted(2, 0))).toEqual(dataSet[2][0][1]);
});
});
describe('inherited properties', () => {
it('"deployment-data" keeps the same value', () => {
const { deploymentData } = getTimeSeriesProps();
expect(deploymentData).toEqual(anomalyDeploymentData);
});
it('"thresholds" keeps the same value', () => {
const { thresholds } = getTimeSeriesProps();
expect(thresholds).toEqual(inputThresholds);
});
it('"projectPath" keeps the same value', () => {
const { projectPath } = getTimeSeriesProps();
expect(projectPath).toEqual(mockProjectPath);
});
});
});
});
describe('with no boundary data', () => {
const dataSetName = 'noBoundary';
const dataSet = anomalyMockResultValues[dataSetName];
beforeEach(() => {
setupAnomalyChart({
graphData: makeAnomalyGraphData(dataSetName),
deploymentData: anomalyDeploymentData,
});
});
describe('option', () => {
let option;
let series;
beforeEach(() => {
({ option } = getTimeSeriesProps());
({ series } = option);
});
it('does not display a boundary band', () => {
expect(series).toEqual(expect.any(Array));
expect(series.length).toEqual(0); // no boundaries
});
it('can format y values (to use in tooltips)', () => {
expect(parseFloat(wrapper.vm.yValueFormatted(0, 0))).toEqual(dataSet[0][0][1]);
expect(wrapper.vm.yValueFormatted(1, 0)).toBe(''); // missing boundary
expect(wrapper.vm.yValueFormatted(2, 0)).toBe(''); // missing boundary
});
});
});
describe('with one anomaly', () => {
const dataSetName = 'oneAnomaly';
const dataSet = anomalyMockResultValues[dataSetName];
beforeEach(() => {
setupAnomalyChart({
graphData: makeAnomalyGraphData(dataSetName),
deploymentData: anomalyDeploymentData,
});
});
describe('series-config', () => {
it('displays one anomaly', () => {
const { seriesConfig } = getTimeSeriesProps();
const { symbolSize, itemStyle } = seriesConfig;
const [metricDataset] = dataSet;
const bigDots = metricDataset.filter((v, dataIndex) => {
const size = symbolSize(null, { dataIndex });
return size > 0.1;
});
const redDots = metricDataset.filter((v, dataIndex) => {
const color = itemStyle.color({ dataIndex });
return color === colorValues.anomalySymbol;
});
expect(bigDots.length).toBe(1);
expect(redDots.length).toBe(1);
});
});
});
describe('with offset', () => {
const dataSetName = 'negativeBoundary';
const dataSet = anomalyMockResultValues[dataSetName];
const expectedOffset = 4; // Lowst point in mock data is -3.70, it gets rounded
beforeEach(() => {
setupAnomalyChart({
graphData: makeAnomalyGraphData(dataSetName),
deploymentData: anomalyDeploymentData,
});
});
describe('receives props correctly', () => {
describe('graph-data', () => {
it('receives a single "metric" series', () => {
const { graphData } = getTimeSeriesProps();
expect(graphData.queries.length).toBe(1);
});
it('receives "metric" results and applies the offset to them', () => {
const { graphData } = getTimeSeriesProps();
const { result } = graphData.queries[0];
const { values } = result[0];
const [metricDataset] = dataSet;
expect(values).toEqual(expect.any(Array));
values.forEach(([, y], index) => {
expect(y).toBeCloseTo(metricDataset[index][1] + expectedOffset);
});
});
});
});
describe('option', () => {
it('upper boundary values are stacked on top of lower boundary, plus the offset', () => {
const { option } = getTimeSeriesProps();
const { series } = option;
const [lowerSeries, upperSeries] = series;
const [, upperDataset, lowerDataset] = dataSet;
lowerSeries.data.forEach(([, y], i) => {
expect(y).toBeCloseTo(lowerDataset[i][1] + expectedOffset);
});
upperSeries.data.forEach(([, y], i) => {
expect(y).toBeCloseTo(upperDataset[i][1] - lowerDataset[i][1]);
});
});
});
});
});
export const mockProjectDir = '/frontend-fixtures/environments-project';
export const anomalyDeploymentData = [
{
id: 111,
iid: 3,
sha: 'f5bcd1d9dac6fa4137e2510b9ccd134ef2e84187',
ref: {
name: 'master',
},
created_at: '2019-08-19T22:00:00.000Z',
deployed_at: '2019-08-19T22:01:00.000Z',
tag: false,
'last?': true,
},
{
id: 110,
iid: 2,
sha: 'f5bcd1d9dac6fa4137e2510b9ccd134ef2e84187',
ref: {
name: 'master',
},
created_at: '2019-08-19T23:00:00.000Z',
deployed_at: '2019-08-19T23:00:00.000Z',
tag: false,
'last?': false,
},
];
export const anomalyMockResultValues = {
noAnomaly: [
[
['2019-08-19T19:00:00.000Z', 1.25],
['2019-08-19T20:00:00.000Z', 1.45],
['2019-08-19T21:00:00.000Z', 1.55],
['2019-08-19T22:00:00.000Z', 1.48],
],
[
// upper boundary
['2019-08-19T19:00:00.000Z', 2],
['2019-08-19T20:00:00.000Z', 2.55],
['2019-08-19T21:00:00.000Z', 2.65],
['2019-08-19T22:00:00.000Z', 3.0],
],
[
// lower boundary
['2019-08-19T19:00:00.000Z', 0.45],
['2019-08-19T20:00:00.000Z', 0.65],
['2019-08-19T21:00:00.000Z', 0.7],
['2019-08-19T22:00:00.000Z', 0.8],
],
],
noBoundary: [
[
['2019-08-19T19:00:00.000Z', 1.25],
['2019-08-19T20:00:00.000Z', 1.45],
['2019-08-19T21:00:00.000Z', 1.55],
['2019-08-19T22:00:00.000Z', 1.48],
],
[
// empty upper boundary
],
[
// empty lower boundary
],
],
oneAnomaly: [
[
['2019-08-19T19:00:00.000Z', 1.25],
['2019-08-19T20:00:00.000Z', 3.45], // anomaly
['2019-08-19T21:00:00.000Z', 1.55],
],
[
// upper boundary
['2019-08-19T19:00:00.000Z', 2],
['2019-08-19T20:00:00.000Z', 2.55],
['2019-08-19T21:00:00.000Z', 2.65],
],
[
// lower boundary
['2019-08-19T19:00:00.000Z', 0.45],
['2019-08-19T20:00:00.000Z', 0.65],
['2019-08-19T21:00:00.000Z', 0.7],
],
],
negativeBoundary: [
[
['2019-08-19T19:00:00.000Z', 1.25],
['2019-08-19T20:00:00.000Z', 3.45], // anomaly
['2019-08-19T21:00:00.000Z', 1.55],
],
[
// upper boundary
['2019-08-19T19:00:00.000Z', 2],
['2019-08-19T20:00:00.000Z', 2.55],
['2019-08-19T21:00:00.000Z', 2.65],
],
[
// lower boundary
['2019-08-19T19:00:00.000Z', -1.25],
['2019-08-19T20:00:00.000Z', -2.65],
['2019-08-19T21:00:00.000Z', -3.7], // lowest point
],
],
};
export const anomalyMockGraphData = {
title: 'Requests Per Second Mock Data',
type: 'anomaly-chart',
weight: 3,
metrics: [
// Not used
],
queries: [
{
metricId: '90',
id: 'metric',
query_range: 'MOCK_PROMETHEUS_METRIC_QUERY_RANGE',
unit: 'RPS',
label: 'Metrics RPS',
metric_id: 90,
prometheus_endpoint_path: 'MOCK_METRIC_PEP',
result: [
{
metric: {},
values: [['2019-08-19T19:00:00.000Z', 0]],
},
],
},
{
metricId: '91',
id: 'upper',
query_range: '...',
unit: 'RPS',
label: 'Upper Limit Metrics RPS',
metric_id: 91,
prometheus_endpoint_path: 'MOCK_UPPER_PEP',
result: [
{
metric: {},
values: [['2019-08-19T19:00:00.000Z', 0]],
},
],
},
{
metricId: '92',
id: 'lower',
query_range: '...',
unit: 'RPS',
label: 'Lower Limit Metrics RPS',
metric_id: 92,
prometheus_endpoint_path: 'MOCK_LOWER_PEP',
result: [
{
metric: {},
values: [['2019-08-19T19:00:00.000Z', 0]],
},
],
},
],
};
Loading
Loading
@@ -104,7 +104,7 @@ describe('Repository table row component', () => {
if (pushes) {
expect(visitUrl).not.toHaveBeenCalled();
} else {
expect(visitUrl).toHaveBeenCalledWith('https://test.com');
expect(visitUrl).toHaveBeenCalledWith('https://test.com', undefined);
}
});
 
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment