Skip to content
Snippets Groups Projects
Commit ca055120 authored by GitLab Bot's avatar GitLab Bot
Browse files

Add latest changes from gitlab-org/gitlab@master

parent 6b833f1e
No related branches found
No related tags found
No related merge requests found
Showing
with 447 additions and 363 deletions
Loading
Loading
@@ -82,9 +82,9 @@ GitLab is a Ruby on Rails application that runs on the following software:
- Ruby (MRI) 2.6.5
- Git 2.8.4+
- Redis 2.8+
- PostgreSQL (preferred) or MySQL
- PostgreSQL 9.6+
 
For more information please see the [architecture documentation](https://docs.gitlab.com/ce/development/architecture.html).
For more information please see the [architecture](https://docs.gitlab.com/ee/development/architecture.html) and [requirements](https://docs.gitlab.com/ee/install/requirements.html) documentation.
 
## UX design
 
Loading
Loading
<script>
import { GlButton, GlModal, GlModalDirective, GlLink, GlSprintf } from '@gitlab/ui';
import ModalCopyButton from '~/vue_shared/components/modal_copy_button.vue';
import { s__ } from '~/locale';
export default {
components: {
GlButton,
GlLink,
GlModal,
GlSprintf,
ModalCopyButton,
},
directives: {
'gl-modal': GlModalDirective,
},
instructionText: {
step1: s__(
'EnableReviewApp|%{stepStart}Step 1%{stepEnd}. Ensure you have Kubernetes set up and have a base domain for your %{linkStart}cluster%{linkEnd}.',
),
step2: s__('EnableReviewApp|%{stepStart}Step 2%{stepEnd}. Copy the following snippet:'),
step3: s__(
`EnableReviewApp|%{stepStart}Step 3%{stepEnd}. Add it to the project %{linkStart}gitlab-ci.yml%{linkEnd} file.`,
),
},
modalInfo: {
closeText: s__('EnableReviewApp|Close'),
copyToClipboardText: s__('EnableReviewApp|Copy snippet text'),
copyString: `deploy_review
stage: deploy
script:
- echo "Deploy a review app"
environment:
name: review/$CI_COMMIT_REF_NAME
url: https://$CI_ENVIRONMENT_SLUG.example.com
only: branches
except: master`,
id: 'enable-review-app-info',
title: s__('ReviewApp|Enable Review App'),
},
};
</script>
<template>
<div>
<gl-button
v-gl-modal="$options.modalInfo.id"
variant="info"
category="secondary"
type="button"
class="js-enable-review-app-button"
>
{{ s__('Environments|Enable review app') }}
</gl-button>
<gl-modal
:modal-id="$options.modalInfo.id"
:title="$options.modalInfo.title"
size="lg"
class="text-2 ws-normal"
ok-only
ok-variant="light"
:ok-title="$options.modalInfo.closeText"
>
<p>
<gl-sprintf :message="$options.instructionText.step1">
<template #step="{ content }">
<strong>{{ content }}</strong>
</template>
<template #link="{ content }">
<gl-link
href="https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html"
target="_blank"
>{{ content }}</gl-link
>
</template>
</gl-sprintf>
</p>
<div>
<p>
<gl-sprintf :message="$options.instructionText.step2">
<template #step="{ content }">
<strong>{{ content }}</strong>
</template>
</gl-sprintf>
</p>
<div class="flex align-items-start">
<pre class="w-100"> {{ $options.modalInfo.copyString }} </pre>
<modal-copy-button
:title="$options.modalInfo.copyToClipboardText"
:text="$options.modalInfo.copyString"
:modal-id="$options.modalInfo.id"
css-classes="border-0"
/>
</div>
</div>
<p>
<gl-sprintf :message="$options.instructionText.step3">
<template #step="{ content }">
<strong>{{ content }}</strong>
</template>
<template #link="{ content }">
<gl-link href="blob/master/.gitlab-ci.yml" target="_blank">{{ content }}</gl-link>
</template>
</gl-sprintf>
</p>
</gl-modal>
</div>
</template>
<script>
import { GlButton } from '@gitlab/ui';
import envrionmentsAppMixin from 'ee_else_ce/environments/mixins/environments_app_mixin';
import Flash from '../../flash';
import { s__ } from '../../locale';
import Flash from '~/flash';
import { s__ } from '~/locale';
import emptyState from './empty_state.vue';
import eventHub from '../event_hub';
import environmentsMixin from '../mixins/environments_mixin';
import CIPaginationMixin from '../../vue_shared/mixins/ci_pagination_api_mixin';
import CIPaginationMixin from '~/vue_shared/mixins/ci_pagination_api_mixin';
import EnableReviewAppButton from './enable_review_app_button.vue';
import StopEnvironmentModal from './stop_environment_modal.vue';
import ConfirmRollbackModal from './confirm_rollback_modal.vue';
 
export default {
components: {
ConfirmRollbackModal,
emptyState,
EnableReviewAppButton,
GlButton,
StopEnvironmentModal,
ConfirmRollbackModal,
},
 
mixins: [CIPaginationMixin, environmentsMixin, envrionmentsAppMixin],
Loading
Loading
@@ -96,10 +100,16 @@ export default {
<div class="top-area">
<tabs :tabs="tabs" scope="environments" @onChangeTab="onChangeTab" />
 
<div v-if="canCreateEnvironment && !isLoading" class="nav-controls">
<a :href="newEnvironmentPath" class="btn btn-success">
<div class="nav-controls">
<enable-review-app-button v-if="state.reviewAppDetails.can_setup_review_app" class="mr-2" />
<gl-button
v-if="canCreateEnvironment && !isLoading"
:href="newEnvironmentPath"
category="primary"
variant="success"
>
{{ s__('Environments|New environment') }}
</a>
</gl-button>
</div>
</div>
 
Loading
Loading
Loading
Loading
@@ -52,6 +52,7 @@ export default {
this.store.storeAvailableCount(resp.data.available_count);
this.store.storeStoppedCount(resp.data.stopped_count);
this.store.storeEnvironments(resp.data.environments);
this.store.setReviewAppDetails(resp.data.review_app);
this.store.setPagination(resp.headers);
}
},
Loading
Loading
Loading
Loading
@@ -14,6 +14,7 @@ export default class EnvironmentsStore {
this.state.stoppedCounter = 0;
this.state.availableCounter = 0;
this.state.paginationInformation = {};
this.state.reviewAppDetails = {};
 
return this;
}
Loading
Loading
@@ -104,6 +105,11 @@ export default class EnvironmentsStore {
return paginationInformation;
}
 
setReviewAppDetails(details = {}) {
this.state.reviewAppDetails = details;
return details;
}
/**
* Stores the number of available environments.
*
Loading
Loading
Loading
Loading
@@ -37,7 +37,6 @@ class ApplicationController < ActionController::Base
around_action :set_current_context
around_action :set_locale
around_action :set_session_storage
around_action :set_current_admin
 
after_action :set_page_title_header, if: :json_request?
after_action :limit_session_time, if: -> { !current_user }
Loading
Loading
@@ -474,13 +473,6 @@ class ApplicationController < ActionController::Base
response.headers['Page-Title'] = URI.escape(page_title('GitLab'))
end
 
def set_current_admin(&block)
return yield unless Feature.enabled?(:user_mode_in_session)
return yield unless current_user
Gitlab::Auth::CurrentUserMode.with_current_admin(current_user, &block)
end
def html_request?
request.format.html?
end
Loading
Loading
---
title: Create conditional Enable Review App button
merge_request: 23703
author:
type: added
---
title: Add cycle analytics duration chart with median line
merge_request: 23971
author:
type: added
---
title: Admin mode support in sidekiq jobs
merge_request: 21792
author: Diego Louzán
type: changed
Loading
Loading
@@ -347,6 +347,12 @@ reviewee.
of the contributed code. It's usually a good idea to ask another maintainer or
reviewer before doing it, but have the courage to do it when you believe it is
important.
- In the interest of [Iteration](https://about.gitlab.com/handbook/values/#iteration),
if, as a reviewer, your suggestions are non-blocking changes or personal preference
(not a documented or agreed requirement), consider approving the merge request
before passing it back to the author. This allows them to implement your suggestions
if they agree, or allows them to pass it onto the
maintainer for review straight away. This can help reduce our overall time-to-merge.
- There is a difference in doing things right and doing things right now.
Ideally, we should do the former, but in the real world we need the latter as
well. A good example is a security fix which should be released as soon as
Loading
Loading
Loading
Loading
@@ -379,10 +379,6 @@ Rails migration example:
 
```ruby
add_column_with_default(:projects, :foo, :integer, default: 10, limit: 8)
# or
add_column(:projects, :foo, :integer, default: 10, limit: 8)
```
 
## Timestamp column type
Loading
Loading
# `ReactiveCaching`
> This doc refers to <https://gitlab.com/gitlab-org/gitlab/blob/master/app/models/concerns/reactive_caching.rb>.
The `ReactiveCaching` concern is used for fetching some data in the background and store it
in the Rails cache, keeping it up-to-date for as long as it is being requested. If the
data hasn't been requested for `reactive_cache_lifetime`, it will stop being refreshed,
and then be removed.
## Examples
```ruby
class Foo < ApplicationRecord
include ReactiveCaching
after_save :clear_reactive_cache!
def calculate_reactive_cache(param1, param2)
# Expensive operation here. The return value of this method is cached
end
def result
# Any arguments can be passed to `with_reactive_cache`. `calculate_reactive_cache`
# will be called with the same arguments.
with_reactive_cache(param1, param2) do |data|
# ...
end
end
end
```
In this example, the first time `#result` is called, it will return `nil`. However,
it will enqueue a background worker to call `#calculate_reactive_cache` and set an
initial cache lifetime of 10 min.
## How it works
The first time `#with_reactive_cache` is called, a background job is enqueued and
`with_reactive_cache` returns `nil`. The background job calls `#calculate_reactive_cache`
and stores its return value. It also re-enqueues the background job to run again after
`reactive_cache_refresh_interval`. Therefore, it will keep the stored value up to date.
Calculations never run concurrently.
Calling `#with_reactive_cache` while a value is cached will call the block given to
`#with_reactive_cache`, yielding the cached value. It will also extend the lifetime
of the cache by the `reactive_cache_lifetime` value.
Once the lifetime has expired, no more background jobs will be enqueued and calling
`#with_reactive_cache` will again return `nil` - starting the process all over again.
## When to use
- If we need to make a request to an external API (for example, requests to the k8s API).
It is not advisable to keep the application server worker blocked for the duration of
the external request.
- If a model needs to perform a lot of database calls or other time consuming
calculations.
## How to use
### In models and services
The ReactiveCaching concern can be used in models as well as `project_services`
(`app/models/project_services`).
1. Include the concern in your model or service.
When including in a model:
```ruby
include ReactiveCaching
```
or when including in a `project_service`:
```ruby
include ReactiveService
```
1. Implement the `calculate_reactive_cache` method in your model/service.
1. Call `with_reactive_cache` in your model/service where the cached value is needed.
### In controllers
Controller endpoints that call a model or service method that uses `ReactiveCaching` should
not wait until the background worker completes.
- An API that calls a model or service method that uses `ReactiveCaching` should return
`202 accepted` when the cache is being calculated (when `#with_reactive_cache` returns `nil`).
- It should also
[set the polling interval header](fe_guide/performance.md#realtime-components) with
`Gitlab::PollingInterval.set_header`.
- The consumer of the API is expected to poll the API.
- You can also consider implementing [ETag caching](polling.md) to reduce the server
load caused by polling.
### Methods to implement in a model or service
These are methods that should be implemented in the model/service that includes `ReactiveCaching`.
#### `#calculate_reactive_cache` (required)
- This method must be implemented. Its return value will be cached.
- It will be called by `ReactiveCaching` when it needs to populate the cache.
- Any arguments passed to `with_reactive_cache` will also be passed to `calculate_reactive_cache`.
#### `#reactive_cache_updated` (optional)
- This method can be implemented if needed.
- It is called by the `ReactiveCaching` concern whenever the cache is updated.
If the cache is being refreshed and the new cache value is the same as the old cache
value, this method will not be called. It is only called if a new value is stored in
the cache.
- It can be used to perform an action whenever the cache is updated.
### Methods called by a model or service
These are methods provided by `ReactiveCaching` and should be called in
the model/service.
#### `#with_reactive_cache` (required)
- `with_reactive_cache` must be called where the result of `calculate_reactive_cache`
is required.
- A block can be given to `with_reactive_cache`. `with_reactive_cache` can also take
any number of arguments. Any arguments passed to `with_reactive_cache` will be
passed to `calculate_reactive_cache`. The arguments passed to `with_reactive_cache`
will be appended to the cache key name.
- If `with_reactive_cache` is called when the result has already been cached, the
block will be called, yielding the cached value and the return value of the block
will be returned by `with_reactive_cache`. It will also reset the timeout of the
cache to the `reactive_cache_lifetime` value.
- If the result has not been cached as yet, `with_reactive_cache` will return nil.
It will also enqueue a background job, which will call `calculate_reactive_cache`
and cache the result.
- Once the background job has completed and the result is cached, the next call
to `with_reactive_cache` will pick up the cached value.
- In the example below, `data` is the cached value which is yielded to the block
given to `with_reactive_cache`.
```ruby
class Foo < ApplicationRecord
include ReactiveCaching
def calculate_reactive_cache(param1, param2)
# Expensive operation here. The return value of this method is cached
end
def result
with_reactive_cache(param1, param2) do |data|
# ...
end
end
end
```
#### `#clear_reactive_cache!` (optional)
- This method can be called when the cache needs to be expired/cleared. For example,
it can be called in an `after_save` callback in a model so that the cache is
cleared after the model is modified.
- This method should be called with the same parameters that are passed to
`with_reactive_cache` because the parameters are part of the cache key.
#### `#without_reactive_cache` (optional)
- This is a convenience method that can be used for debugging purposes.
- This method calls `calculate_reactive_cache` in the current process instead of
in a background worker.
### Configurable options
There are some `class_attribute` options which can be tweaked.
#### `self.reactive_cache_key`
- The value of this attribute is the prefix to the `data` and `alive` cache key names.
The parameters passed to `with_reactive_cache` form the rest of the cache key names.
- By default, this key uses the model's name and the ID of the record.
```ruby
self.reactive_cache_key = -> (record) { [model_name.singular, record.id] }
```
- The `data` and `alive` cache keys in this case will be `"ExampleModel:1:arg1:arg2"`
and `"ExampleModel:1:arg1:arg2:alive"` respectively, where `ExampleModel` is the
name of the model, `1` is the ID of the record, `arg1` and `arg2` are parameters
passed to `with_reactive_cache`.
- If you're including this concern in a service instead, you will need to override
the default by adding the following to your service:
```ruby
self.reactive_cache_key = ->(service) { [service.class.model_name.singular, service.project_id] }
```
If your reactive_cache_key is exactly like the above, you can use the existing
`ReactiveService` concern instead.
#### `self.reactive_cache_lease_timeout`
- `ReactiveCaching` uses `Gitlab::ExclusiveLease` to ensure that the cache calculation
is never run concurrently by multiple workers.
- This attribute is the timeout for the `Gitlab::ExclusiveLease`.
- It defaults to 2 minutes, but can be overriden if a different timeout is required.
```ruby
self.reactive_cache_lease_timeout = 2.minutes
```
#### `self.reactive_cache_refresh_interval`
- This is the interval at which the cache is refreshed.
- It defaults to 1 minute.
```ruby
self.reactive_cache_lease_timeout = 1.minute
```
#### `self.reactive_cache_lifetime`
- This is the duration after which the cache will be cleared if there are no requests.
- The default is 10 minutes. If there are no requests for this cache value for 10 minutes,
the cache will expire.
- If the cache value is requested before it expires, the timeout of the cache will
be reset to `reactive_cache_lifetime`.
```ruby
self.reactive_cache_lifetime = 10.minutes
```
#### `self.reactive_cache_worker_finder`
- This is the method used by the background worker to find or generate the object on
which `calculate_reactive_cache` can be called.
- By default it uses the model primary key to find the object:
```ruby
self.reactive_cache_worker_finder = ->(id, *_args) do
find_by(primary_key => id)
end
```
- The default behaviour can be overridden by defining a custom `reactive_cache_worker_finder`.
```ruby
class Foo < ApplicationRecord
include ReactiveCaching
self.reactive_cache_worker_finder = ->(_id, *args) { from_cache(*args) }
def self.from_cache(var1, var2)
# This method will be called by the background worker with "bar1" and
# "bar2" as arguments.
new(var1, var2)
end
def initialize(var1, var2)
# ...
end
def calculate_reactive_cache(var1, var2)
# Expensive operation here. The return value of this method is cached
end
def result
with_reactive_cache("bar1", "bar2") do |data|
# ...
end
end
end
```
- In this example, the primary key ID will be passed to `reactive_cache_worker_finder`
along with the parameters passed to `with_reactive_cache`.
- The custom `reactive_cache_worker_finder` calls `.from_cache` with the parameters
passed to `with_reactive_cache`.
Loading
Loading
@@ -196,277 +196,4 @@ end
 
## `ReactiveCaching`
 
> This doc refers to <https://gitlab.com/gitlab-org/gitlab/blob/master/app/models/concerns/reactive_caching.rb>.
The `ReactiveCaching` concern is used for fetching some data in the background and store it
in the Rails cache, keeping it up-to-date for as long as it is being requested. If the
data hasn't been requested for `reactive_cache_lifetime`, it will stop being refreshed,
and then be removed.
### Examples
```ruby
class Foo < ApplicationRecord
include ReactiveCaching
after_save :clear_reactive_cache!
def calculate_reactive_cache(param1, param2)
# Expensive operation here. The return value of this method is cached
end
def result
# Any arguments can be passed to `with_reactive_cache`. `calculate_reactive_cache`
# will be called with the same arguments.
with_reactive_cache(param1, param2) do |data|
# ...
end
end
end
```
In this example, the first time `#result` is called, it will return `nil`. However,
it will enqueue a background worker to call `#calculate_reactive_cache` and set an
initial cache lifetime of 10 min.
### How it works
The first time `#with_reactive_cache` is called, a background job is enqueued and
`with_reactive_cache` returns `nil`. The background job calls `#calculate_reactive_cache`
and stores its return value. It also re-enqueues the background job to run again after
`reactive_cache_refresh_interval`. Therefore, it will keep the stored value up to date.
Calculations never run concurrently.
Calling `#with_reactive_cache` while a value is cached will call the block given to
`#with_reactive_cache`, yielding the cached value. It will also extend the lifetime
of the cache by the `reactive_cache_lifetime` value.
Once the lifetime has expired, no more background jobs will be enqueued and calling
`#with_reactive_cache` will again return `nil` - starting the process all over again.
### When to use
- If we need to make a request to an external API (for example, requests to the k8s API).
It is not advisable to keep the application server worker blocked for the duration of
the external request.
- If a model needs to perform a lot of database calls or other time consuming
calculations.
### How to use
#### In models and services
The ReactiveCaching concern can be used in models as well as `project_services`
(`app/models/project_services`).
1. Include the concern in your model or service.
When including in a model:
```ruby
include ReactiveCaching
```
or when including in a `project_service`:
```ruby
include ReactiveService
```
1. Implement the `calculate_reactive_cache` method in your model/service.
1. Call `with_reactive_cache` in your model/service where the cached value is needed.
#### In controllers
Controller endpoints that call a model or service method that uses `ReactiveCaching` should
not wait until the background worker completes.
- An API that calls a model or service method that uses `ReactiveCaching` should return
`202 accepted` when the cache is being calculated (when `#with_reactive_cache` returns `nil`).
- It should also
[set the polling interval header](fe_guide/performance.md#realtime-components) with
`Gitlab::PollingInterval.set_header`.
- The consumer of the API is expected to poll the API.
- You can also consider implementing [ETag caching](polling.md) to reduce the server
load caused by polling.
#### Methods to implement in a model or service
These are methods that should be implemented in the model/service that includes `ReactiveCaching`.
##### `#calculate_reactive_cache` (required)
- This method must be implemented. Its return value will be cached.
- It will be called by `ReactiveCaching` when it needs to populate the cache.
- Any arguments passed to `with_reactive_cache` will also be passed to `calculate_reactive_cache`.
##### `#reactive_cache_updated` (optional)
- This method can be implemented if needed.
- It is called by the `ReactiveCaching` concern whenever the cache is updated.
If the cache is being refreshed and the new cache value is the same as the old cache
value, this method will not be called. It is only called if a new value is stored in
the cache.
- It can be used to perform an action whenever the cache is updated.
#### Methods called by a model or service
These are methods provided by `ReactiveCaching` and should be called in
the model/service.
##### `#with_reactive_cache` (required)
- `with_reactive_cache` must be called where the result of `calculate_reactive_cache`
is required.
- A block can be given to `with_reactive_cache`. `with_reactive_cache` can also take
any number of arguments. Any arguments passed to `with_reactive_cache` will be
passed to `calculate_reactive_cache`. The arguments passed to `with_reactive_cache`
will be appended to the cache key name.
- If `with_reactive_cache` is called when the result has already been cached, the
block will be called, yielding the cached value and the return value of the block
will be returned by `with_reactive_cache`. It will also reset the timeout of the
cache to the `reactive_cache_lifetime` value.
- If the result has not been cached as yet, `with_reactive_cache` will return nil.
It will also enqueue a background job, which will call `calculate_reactive_cache`
and cache the result.
- Once the background job has completed and the result is cached, the next call
to `with_reactive_cache` will pick up the cached value.
- In the example below, `data` is the cached value which is yielded to the block
given to `with_reactive_cache`.
```ruby
class Foo < ApplicationRecord
include ReactiveCaching
def calculate_reactive_cache(param1, param2)
# Expensive operation here. The return value of this method is cached
end
def result
with_reactive_cache(param1, param2) do |data|
# ...
end
end
end
```
##### `#clear_reactive_cache!` (optional)
- This method can be called when the cache needs to be expired/cleared. For example,
it can be called in an `after_save` callback in a model so that the cache is
cleared after the model is modified.
- This method should be called with the same parameters that are passed to
`with_reactive_cache` because the parameters are part of the cache key.
##### `#without_reactive_cache` (optional)
- This is a convenience method that can be used for debugging purposes.
- This method calls `calculate_reactive_cache` in the current process instead of
in a background worker.
#### Configurable options
There are some `class_attribute` options which can be tweaked.
##### `self.reactive_cache_key`
- The value of this attribute is the prefix to the `data` and `alive` cache key names.
The parameters passed to `with_reactive_cache` form the rest of the cache key names.
- By default, this key uses the model's name and the ID of the record.
```ruby
self.reactive_cache_key = -> (record) { [model_name.singular, record.id] }
```
- The `data` and `alive` cache keys in this case will be `"ExampleModel:1:arg1:arg2"`
and `"ExampleModel:1:arg1:arg2:alive"` respectively, where `ExampleModel` is the
name of the model, `1` is the ID of the record, `arg1` and `arg2` are parameters
passed to `with_reactive_cache`.
- If you're including this concern in a service instead, you will need to override
the default by adding the following to your service:
```ruby
self.reactive_cache_key = ->(service) { [service.class.model_name.singular, service.project_id] }
```
If your reactive_cache_key is exactly like the above, you can use the existing
`ReactiveService` concern instead.
##### `self.reactive_cache_lease_timeout`
- `ReactiveCaching` uses `Gitlab::ExclusiveLease` to ensure that the cache calculation
is never run concurrently by multiple workers.
- This attribute is the timeout for the `Gitlab::ExclusiveLease`.
- It defaults to 2 minutes, but can be overriden if a different timeout is required.
```ruby
self.reactive_cache_lease_timeout = 2.minutes
```
##### `self.reactive_cache_refresh_interval`
- This is the interval at which the cache is refreshed.
- It defaults to 1 minute.
```ruby
self.reactive_cache_lease_timeout = 1.minute
```
##### `self.reactive_cache_lifetime`
- This is the duration after which the cache will be cleared if there are no requests.
- The default is 10 minutes. If there are no requests for this cache value for 10 minutes,
the cache will expire.
- If the cache value is requested before it expires, the timeout of the cache will
be reset to `reactive_cache_lifetime`.
```ruby
self.reactive_cache_lifetime = 10.minutes
```
##### `self.reactive_cache_worker_finder`
- This is the method used by the background worker to find or generate the object on
which `calculate_reactive_cache` can be called.
- By default it uses the model primary key to find the object:
```ruby
self.reactive_cache_worker_finder = ->(id, *_args) do
find_by(primary_key => id)
end
```
- The default behaviour can be overridden by defining a custom `reactive_cache_worker_finder`.
```ruby
class Foo < ApplicationRecord
include ReactiveCaching
self.reactive_cache_worker_finder = ->(_id, *args) { from_cache(*args) }
def self.from_cache(var1, var2)
# This method will be called by the background worker with "bar1" and
# "bar2" as arguments.
new(var1, var2)
end
def initialize(var1, var2)
# ...
end
def calculate_reactive_cache(var1, var2)
# Expensive operation here. The return value of this method is cached
end
def result
with_reactive_cache("bar1", "bar2") do |data|
# ...
end
end
end
```
- In this example, the primary key ID will be passed to `reactive_cache_worker_finder`
along with the parameters passed to `with_reactive_cache`.
- The custom `reactive_cache_worker_finder` calls `.from_cache` with the parameters
passed to `with_reactive_cache`.
Read the documentation on [`ReactiveCaching`](reactive_caching.md).
Loading
Loading
@@ -172,16 +172,23 @@ For example, if 30 days worth of data has been selected (for example, 2019-12-16
median line will represent the previous 30 days worth of data (2019-11-16 to 2019-12-16)
as a metric to compare against.
 
### Enabling chart
### Disabling chart
 
By default, this chart is disabled for self-managed instances. To enable it, ask an
administrator with Rails console access to run the following:
This chart is enabled by default. If you have a self-managed instance, an
administrator can open a Rails console and disable it with the following command:
 
```ruby
Feature.enable(:cycle_analytics_scatterplot_enabled)
Feature.disable(:cycle_analytics_scatterplot_enabled)
```
 
This chart is enabled by default on GitLab.com.
### Disabling chart median line
This chart median line is enabled by default. If you have a self-managed instance, an
administrator can open a Rails console and disable it with the following command:
```ruby
Feature.disable(:cycle_analytics_scatterplot_median_enabled)
```
 
## Permissions
 
Loading
Loading
doc/user/project/merge_requests/img/approvals_premium_mr_widget.png

21.7 KiB

doc/user/project/merge_requests/img/approvals_premium_mr_widget_v12_7.png

194 KiB

doc/user/project/merge_requests/img/mr_approvals_by_code_owners_v12_4.png

26.3 KiB

doc/user/project/merge_requests/img/mr_approvals_by_code_owners_v12_7.png

86.7 KiB

Loading
Loading
@@ -74,9 +74,9 @@ To enable this merge request approval rule:
 
1. Navigate to your project's **Settings > General** and expand
**Merge request approvals**.
1. Locate **All members with Developer role or higher and code owners (if any)** and click **Edit** to choose the number of approvals required.
1. Locate **Any eligible user** and choose the number of approvals required.
 
![MR approvals by Code Owners](img/mr_approvals_by_code_owners_v12_4.png)
![MR approvals by Code Owners](img/mr_approvals_by_code_owners_v12_7.png)
 
Once set, merge requests can only be merged once approved by the
number of approvals you've set. GitLab will accept approvals from
Loading
Loading
@@ -145,7 +145,7 @@ a rule is already defined.
When an [eligible approver](#eligible-approvers) approves a merge request, it will
reduce the number of approvals left for all rules that the approver belongs to.
 
![Approvals premium merge request widget](img/approvals_premium_mr_widget.png)
![Approvals premium merge request widget](img/approvals_premium_mr_widget_v12_7.png)
 
## Adding or removing an approval
 
Loading
Loading
Loading
Loading
@@ -10,54 +10,12 @@ module Gitlab
class CurrentUserMode
NotRequestedError = Class.new(StandardError)
 
# RequestStore entries
CURRENT_REQUEST_BYPASS_SESSION_ADMIN_ID_RS_KEY = { res: :current_user_mode, data: :bypass_session_admin_id }.freeze
CURRENT_REQUEST_ADMIN_MODE_USER_RS_KEY = { res: :current_user_mode, data: :current_admin }.freeze
# SessionStore entries
SESSION_STORE_KEY = :current_user_mode
ADMIN_MODE_START_TIME_KEY = :admin_mode
ADMIN_MODE_REQUESTED_TIME_KEY = :admin_mode_requested
ADMIN_MODE_START_TIME_KEY = 'admin_mode'
ADMIN_MODE_REQUESTED_TIME_KEY = 'admin_mode_requested'
MAX_ADMIN_MODE_TIME = 6.hours
ADMIN_MODE_REQUESTED_GRACE_PERIOD = 5.minutes
 
class << self
# Admin mode activation requires storing a flag in the user session. Using this
# method when scheduling jobs in Sidekiq will bypass the session check for a
# user that was already in admin mode
def bypass_session!(admin_id)
Gitlab::SafeRequestStore[CURRENT_REQUEST_BYPASS_SESSION_ADMIN_ID_RS_KEY] = admin_id
Gitlab::AppLogger.debug("Bypassing session in admin mode for: #{admin_id}")
yield
ensure
Gitlab::SafeRequestStore.delete(CURRENT_REQUEST_BYPASS_SESSION_ADMIN_ID_RS_KEY)
end
def bypass_session_admin_id
Gitlab::SafeRequestStore[CURRENT_REQUEST_BYPASS_SESSION_ADMIN_ID_RS_KEY]
end
# Store in the current request the provided user model (only if in admin mode)
# and yield
def with_current_admin(admin)
return yield unless self.new(admin).admin_mode?
Gitlab::SafeRequestStore[CURRENT_REQUEST_ADMIN_MODE_USER_RS_KEY] = admin
Gitlab::AppLogger.debug("Admin mode active for: #{admin.username}")
yield
ensure
Gitlab::SafeRequestStore.delete(CURRENT_REQUEST_ADMIN_MODE_USER_RS_KEY)
end
def current_admin
Gitlab::SafeRequestStore[CURRENT_REQUEST_ADMIN_MODE_USER_RS_KEY]
end
end
def initialize(user)
@user = user
end
Loading
Loading
@@ -84,7 +42,7 @@ module Gitlab
 
raise NotRequestedError unless admin_mode_requested?
 
reset_request_store_cache_entries
reset_request_store
 
current_session_data[ADMIN_MODE_REQUESTED_TIME_KEY] = nil
current_session_data[ADMIN_MODE_START_TIME_KEY] = Time.now
Loading
Loading
@@ -97,7 +55,7 @@ module Gitlab
def disable_admin_mode!
return unless user&.admin?
 
reset_request_store_cache_entries
reset_request_store
 
current_session_data[ADMIN_MODE_REQUESTED_TIME_KEY] = nil
current_session_data[ADMIN_MODE_START_TIME_KEY] = nil
Loading
Loading
@@ -106,7 +64,7 @@ module Gitlab
def request_admin_mode!
return unless user&.admin?
 
reset_request_store_cache_entries
reset_request_store
 
current_session_data[ADMIN_MODE_REQUESTED_TIME_KEY] = Time.now
end
Loading
Loading
@@ -115,12 +73,10 @@ module Gitlab
 
attr_reader :user
 
# RequestStore entry to cache #admin_mode? result
def admin_mode_rs_key
@admin_mode_rs_key ||= { res: :current_user_mode, user: user.id, method: :admin_mode? }
end
 
# RequestStore entry to cache #admin_mode_requested? result
def admin_mode_requested_rs_key
@admin_mode_requested_rs_key ||= { res: :current_user_mode, user: user.id, method: :admin_mode_requested? }
end
Loading
Loading
@@ -130,7 +86,6 @@ module Gitlab
end
 
def any_session_with_admin_mode?
return true if bypass_session?
return true if current_session_data.initiated? && current_session_data[ADMIN_MODE_START_TIME_KEY].to_i > MAX_ADMIN_MODE_TIME.ago.to_i
 
all_sessions.any? do |session|
Loading
Loading
@@ -148,11 +103,7 @@ module Gitlab
current_session_data[ADMIN_MODE_REQUESTED_TIME_KEY].to_i > ADMIN_MODE_REQUESTED_GRACE_PERIOD.ago.to_i
end
 
def bypass_session?
user&.id && user.id == self.class.bypass_session_admin_id
end
def reset_request_store_cache_entries
def reset_request_store
Gitlab::SafeRequestStore.delete(admin_mode_rs_key)
Gitlab::SafeRequestStore.delete(admin_mode_requested_rs_key)
end
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment