...@@ -407,7 +407,6 @@ gem 'health_check', '~> 2.6.0' ...@@ -407,7 +407,6 @@ gem 'health_check', '~> 2.6.0'
# System information # System information
gem 'vmstat', '~> 2.3.0' gem 'vmstat', '~> 2.3.0'
gem 'sys-filesystem', '~> 1.1.6' gem 'sys-filesystem', '~> 1.1.6'
gem 'sys-proctable', '~> 1.2'
# SSH host key support # SSH host key support
gem 'net-ssh', '~> 5.0' gem 'net-ssh', '~> 5.0'
... ...
......
...@@ -904,8 +904,6 @@ GEM ...@@ -904,8 +904,6 @@ GEM
httpclient (>= 2.4) httpclient (>= 2.4)
sys-filesystem (1.1.6) sys-filesystem (1.1.6)
ffi ffi
sys-proctable (1.2.1)
ffi
sysexits (1.2.0) sysexits (1.2.0)
temple (0.8.0) temple (0.8.0)
test-prof (0.2.5) test-prof (0.2.5)
...@@ -1211,7 +1209,6 @@ DEPENDENCIES ...@@ -1211,7 +1209,6 @@ DEPENDENCIES
stackprof (~> 0.2.10) stackprof (~> 0.2.10)
state_machines-activerecord (~> 0.5.1) state_machines-activerecord (~> 0.5.1)
sys-filesystem (~> 1.1.6) sys-filesystem (~> 1.1.6)
sys-proctable (~> 1.2)
test-prof (~> 0.2.5) test-prof (~> 0.2.5)
thin (~> 1.7.0) thin (~> 1.7.0)
timecop (~> 0.8.0) timecop (~> 0.8.0)
... ...
......
...@@ -23,7 +23,16 @@ module Members ...@@ -23,7 +23,16 @@ module Members
members.each do |member| members.each do |member|
if member.errors.any? if member.errors.any?
errors << "#{member.user.username}: #{member.errors.full_messages.to_sentence}" current_error =
# Invited users may not have an associated user
if member.user.present?
"#{member.user.username}: "
else
""
end
current_error += member.errors.full_messages.to_sentence
errors << current_error
else else
after_execute(member: member) after_execute(member: member)
end end
... ...
......
...@@ -109,12 +109,20 @@ class FileUploader < GitlabUploader ...@@ -109,12 +109,20 @@ class FileUploader < GitlabUploader
def upload_path def upload_path
if file_storage? if file_storage?
# Legacy path relative to project.full_path # Legacy path relative to project.full_path
File.join(dynamic_segment, identifier) local_storage_path(identifier)
else else
File.join(store_dir, identifier) remote_storage_path(identifier)
end end
end end
def local_storage_path(file_identifier)
File.join(dynamic_segment, file_identifier)
end
def remote_storage_path(file_identifier)
File.join(store_dir, file_identifier)
end
def store_dirs def store_dirs
{ {
Store::LOCAL => File.join(base_dir, dynamic_segment), Store::LOCAL => File.join(base_dir, dynamic_segment),
... ...
......
...@@ -6,15 +6,12 @@ class PersonalFileUploader < FileUploader ...@@ -6,15 +6,12 @@ class PersonalFileUploader < FileUploader
options.storage_path options.storage_path
end end
def self.base_dir(model, store = nil) def self.base_dir(model, _store = nil)
base_dirs(model)[store || Store::LOCAL] # base_dir is the path seen by the user when rendering Markdown, so
end # it should be the same for both local and object storage. It is
# typically prefaced with uploads/-/system, but that prefix
def self.base_dirs(model) # is omitted in the path stored on disk.
{ File.join(options.base_dir, model_path_segment(model))
Store::LOCAL => File.join(options.base_dir, model_path_segment(model)),
Store::REMOTE => model_path_segment(model)
}
end end
def self.model_path_segment(model) def self.model_path_segment(model)
...@@ -40,8 +37,61 @@ class PersonalFileUploader < FileUploader ...@@ -40,8 +37,61 @@ class PersonalFileUploader < FileUploader
store_dirs[object_store] store_dirs[object_store]
end end
# A personal snippet path is stored using FileUploader#upload_path.
#
# The format for the path:
#
# Local storage: :random_hex/:filename.
# Object storage: personal_snippet/:id/:random_hex/:filename.
#
# upload_paths represent the possible paths for a given identifier,
# which will vary depending on whether the file is stored in local or
# object storage. upload_path should match an element in upload_paths.
#
# base_dir represents the path seen by the user in Markdown, and it
# should always be prefixed with uploads/-/system.
#
# store_dirs represent the paths that are actually used on disk. For
# object storage, this should omit the prefix /uploads/-/system.
#
# For example, consider the requested path /uploads/-/system/personal_snippet/172/ff4ad5c2e40b39ae57cda51577317d20/file.png.
#
# For local storage:
#
# File on disk: /opt/gitlab/embedded/service/gitlab-rails/public/uploads/-/system/personal_snippet/172/ff4ad5c2e40b39ae57cda51577317d20/file.png.
#
# base_dir: uploads/-/system/personal_snippet/172
# upload_path: ff4ad5c2e40b39ae57cda51577317d20/file.png
# upload_paths: ["ff4ad5c2e40b39ae57cda51577317d20/file.png", "personal_snippet/172/ff4ad5c2e40b39ae57cda51577317d20/file.png"].
# store_dirs:
# => {1=>"uploads/-/system/personal_snippet/172/ff4ad5c2e40b39ae57cda51577317d20", 2=>"personal_snippet/172/ff4ad5c2e40b39ae57cda51577317d20"}
#
# For object storage:
#
# upload_path: personal_snippet/172/ff4ad5c2e40b39ae57cda51577317d20/file.png
def upload_paths(identifier)
[
local_storage_path(identifier),
File.join(remote_storage_base_path, identifier)
]
end
def store_dirs
{
Store::LOCAL => File.join(base_dir, dynamic_segment),
Store::REMOTE => remote_storage_base_path
}
end
private private
# To avoid prefacing the remote storage path with `/uploads/-/system`,
# we just drop that part so that the destination path will be
# personal_snippet/:id/:random_hex/:filename.
def remote_storage_base_path
File.join(self.class.model_path_segment(model), dynamic_segment)
end
def secure_url def secure_url
File.join('/', base_dir, secret, file.filename) File.join('/', base_dir, secret, file.filename)
end end
... ...
......
---
title: Fix Error 500 when inviting user already present
merge_request: 28198
author:
type: fixed
---
title: Fix incorrect prefix used in new uploads for personal snippets
merge_request: 28337
author:
type: fixed
config/puma.rb.example 0 → 100644
# frozen_string_literal: true
# Load "path" as a rackup file.
#
# The default is "config.ru".
#
rackup 'config.ru'
pidfile '/home/git/gitlab/tmp/pids/puma.pid'
state_path '/home/git/gitlab/tmp/pids/puma.state'
stdout_redirect '/home/git/gitlab/log/puma.stdout.log',
'/home/git/gitlab/log/puma.stderr.log',
true
# Configure "min" to be the minimum number of threads to use to answer
# requests and "max" the maximum.
#
# The default is "0, 16".
#
threads 1, 16
# By default, workers accept all requests and queue them to pass to handlers.
# When false, workers accept the number of simultaneous requests configured.
#
# Queueing requests generally improves performance, but can cause deadlocks if
# the app is waiting on a request to itself. See https://github.com/puma/puma/issues/612
#
# When set to false this may require a reverse proxy to handle slow clients and
# queue requests before they reach puma. This is due to disabling HTTP keepalive
queue_requests false
# Bind the server to "url". "tcp://", "unix://" and "ssl://" are the only
# accepted protocols.
bind 'unix:///home/git/gitlab/tmp/sockets/gitlab.socket'
workers 3
require_relative "/home/git/gitlab/lib/gitlab/cluster/lifecycle_events"
require_relative "/home/git/gitlab/lib/gitlab/cluster/puma_worker_killer_initializer"
on_restart do
# Signal application hooks that we're about to restart
Gitlab::Cluster::LifecycleEvents.do_master_restart
end
before_fork do
# Signal to the puma killer
Gitlab::Cluster::PumaWorkerKillerInitializer.start @config.options unless ENV['DISABLE_PUMA_WORKER_KILLER']
# Signal application hooks that we're about to fork
Gitlab::Cluster::LifecycleEvents.do_before_fork
end
Gitlab::Cluster::LifecycleEvents.set_puma_options @config.options
on_worker_boot do
# Signal application hooks of worker start
Gitlab::Cluster::LifecycleEvents.do_worker_start
end
# Preload the application before starting the workers; this conflicts with
# phased restart feature. (off by default)
preload_app!
tag 'gitlab-puma-worker'
# Verifies that all workers have checked in to the master process within
# the given timeout. If not the worker process will be restarted. Default
# value is 60 seconds.
#
worker_timeout 60
...@@ -22,7 +22,12 @@ class DropProjectsCiId < ActiveRecord::Migration[5.1] ...@@ -22,7 +22,12 @@ class DropProjectsCiId < ActiveRecord::Migration[5.1]
end end
def down def down
add_column :projects, :ci_id, :integer unless column_exists?(:projects, :ci_id)
add_concurrent_index :projects, :ci_id add_column :projects, :ci_id, :integer
end
unless index_exists?(:projects, :ci_id)
add_concurrent_index :projects, :ci_id
end
end end
end end
# GitLab Dependency Proxy administration **[PREMIUM ONLY]**
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ee/issues/7934) in [GitLab Premium](https://about.gitlab.com/pricing) 11.11.
GitLab can be utilized as a dependency proxy for a variety of common package managers.
This is the administration documentation. If you want to learn how to use the
dependency proxies, see the [user guide](../user/group/dependency_proxy/index.md).
## Enabling the Dependency Proxy feature
NOTE: **Note:**
Dependency proxy requires the Puma web server to be enabled.
Puma support is EXPERIMENTAL at this time.
To enable the Dependency proxy feature:
**Omnibus GitLab installations**
1. Edit `/etc/gitlab/gitlab.rb` and add the following line:
```ruby
gitlab_rails['dependency_proxy_enabled'] = true
```
1. Save the file and [reconfigure GitLab][] for the changes to take effect.
1. Enable the [Puma web server](https://docs.gitlab.com/omnibus/settings/puma.html).
**Installations from source**
1. After the installation is complete, you will have to configure the `dependency_proxy`
section in `config/gitlab.yml`. Set to `true` to enable it:
```yaml
dependency_proxy:
enabled: true
```
1. [Restart GitLab] for the changes to take effect.
1. Enable the [Puma web server](../install/installation.md#using-puma).
## Changing the storage path
By default, the dependency proxy files are stored locally, but you can change the default
local location or even use object storage.
### Changing the local storage path
The dependency proxy files for Omnibus GitLab installations are stored under
`/var/opt/gitlab/gitlab-rails/shared/dependency_proxy/` and for source
installations under `shared/dependency_proxy/` (relative to the git home directory).
To change the local storage path:
**Omnibus GitLab installations**
1. Edit `/etc/gitlab/gitlab.rb` and add the following line:
```ruby
gitlab_rails['dependency_proxy_storage_path'] = "/mnt/dependency_proxy"
```
1. Save the file and [reconfigure GitLab][] for the changes to take effect.
**Installations from source**
1. Edit the `dependency_proxy` section in `config/gitlab.yml`:
```yaml
dependency_proxy:
enabled: true
storage_path: shared/dependency_proxy
```
1. [Restart GitLab] for the changes to take effect.
### Using object storage
Instead of relying on the local storage, you can use an object storage to
upload the blobs of the dependency proxy:
**Omnibus GitLab installations**
1. Edit `/etc/gitlab/gitlab.rb` and add the following lines (uncomment where
necessary):
```ruby
gitlab_rails['dependency_proxy_enabled'] = true
gitlab_rails['dependency_proxy_storage_path'] = "/var/opt/gitlab/gitlab-rails/shared/dependency_proxy"
gitlab_rails['dependency_proxy_object_store_enabled'] = true
gitlab_rails['dependency_proxy_object_store_remote_directory'] = "dependency_proxy" # The bucket name.
gitlab_rails['dependency_proxy_object_store_direct_upload'] = false # Use Object Storage directly for uploads instead of background uploads if enabled (Default: false).
gitlab_rails['dependency_proxy_object_store_background_upload'] = true # Temporary option to limit automatic upload (Default: true).
gitlab_rails['dependency_proxy_object_store_proxy_download'] = false # Passthrough all downloads via GitLab instead of using Redirects to Object Storage.
gitlab_rails['dependency_proxy_object_store_connection'] = {
##
## If the provider is AWS S3, uncomment the following
##
#'provider' => 'AWS',
#'region' => 'eu-west-1',
#'aws_access_key_id' => 'AWS_ACCESS_KEY_ID',
#'aws_secret_access_key' => 'AWS_SECRET_ACCESS_KEY',
##
## If the provider is other than AWS (an S3-compatible one), uncomment the following
##
#'host' => 's3.amazonaws.com',
#'aws_signature_version' => 4 # For creation of signed URLs. Set to 2 if provider does not support v4.
#'endpoint' => 'https://s3.amazonaws.com' # Useful for S3-compliant services such as DigitalOcean Spaces.
#'path_style' => false # If true, use 'host/bucket_name/object' instead of 'bucket_name.host/object'.
}
```
1. Save the file and [reconfigure GitLab][] for the changes to take effect.
**Installations from source**
1. Edit the `dependency_proxy` section in `config/gitlab.yml` (uncomment where necessary):
```yaml
dependency_proxy:
enabled: true
##
## The location where build dependency_proxy are stored (default: shared/dependency_proxy).
##
#storage_path: shared/dependency_proxy
object_store:
enabled: false
remote_directory: dependency_proxy # The bucket name.
#direct_upload: false # Use Object Storage directly for uploads instead of background uploads if enabled (Default: false).
#background_upload: true # Temporary option to limit automatic upload (Default: true).
#proxy_download: false # Passthrough all downloads via GitLab instead of using Redirects to Object Storage.
connection:
##
## If the provider is AWS S3, uncomment the following
##
#provider: AWS
#region: us-east-1
#aws_access_key_id: AWS_ACCESS_KEY_ID
#aws_secret_access_key: AWS_SECRET_ACCESS_KEY
##
## If the provider is other than AWS (an S3-compatible one), uncomment the following
##
#host: 's3.amazonaws.com' # default: s3.amazonaws.com.
#aws_signature_version: 4 # For creation of signed URLs. Set to 2 if provider does not support v4.
#endpoint: 'https://s3.amazonaws.com' # Useful for S3-compliant services such as DigitalOcean Spaces.
#path_style: false # If true, use 'host/bucket_name/object' instead of 'bucket_name.host/object'.
```
1. [Restart GitLab] for the changes to take effect.
[reconfigure gitlab]: restart_gitlab.md#omnibus-gitlab-reconfigure "How to reconfigure Omnibus GitLab"
[restart gitlab]: restart_gitlab.md#omnibus-gitlab-reconfigure "How to reconfigure Omnibus GitLab"
...@@ -43,11 +43,10 @@ The following metrics are available: ...@@ -43,11 +43,10 @@ The following metrics are available:
| redis_ping_latency_seconds | Gauge | 9.4 | Round trip time of the redis ping | | redis_ping_latency_seconds | Gauge | 9.4 | Round trip time of the redis ping |
| user_session_logins_total | Counter | 9.4 | Counter of how many users have logged in | | user_session_logins_total | Counter | 9.4 | Counter of how many users have logged in |
| upload_file_does_not_exist | Counter | 10.7 in EE, 11.5 in CE | Number of times an upload record could not find its file | | upload_file_does_not_exist | Counter | 10.7 in EE, 11.5 in CE | Number of times an upload record could not find its file |
| failed_login_captcha_total | Gauge | 11.0 | Counter of failed CAPTCHA attempts during login | | failed_login_captcha_total | Gauge | 11.0 | Counter of failed CAPTCHA attempts during login |
| successful_login_captcha_total | Gauge | 11.0 | Counter of successful CAPTCHA attempts during login | | successful_login_captcha_total | Gauge | 11.0 | Counter of successful CAPTCHA attempts during login |
| unicorn_active_connections | Gauge | 11.0 | The number of active Unicorn connections (workers) | | unicorn_active_connections | Gauge | 11.0 | The number of active Unicorn connections (workers) |
| unicorn_queued_connections | Gauge | 11.0 | The number of queued Unicorn connections | | unicorn_queued_connections | Gauge | 11.0 | The number of queued Unicorn connections |
| unicorn_workers | Gauge | 11.11 | The number of Unicorn workers |
## Sidekiq Metrics available for Geo **[PREMIUM]** ## Sidekiq Metrics available for Geo **[PREMIUM]**
...@@ -101,10 +100,6 @@ Some basic Ruby runtime metrics are available: ...@@ -101,10 +100,6 @@ Some basic Ruby runtime metrics are available:
| ruby_file_descriptors | Gauge | 11.1 | File descriptors per process | | ruby_file_descriptors | Gauge | 11.1 | File descriptors per process |
| ruby_memory_bytes | Gauge | 11.1 | Memory usage by process | | ruby_memory_bytes | Gauge | 11.1 | Memory usage by process |
| ruby_sampler_duration_seconds_total | Counter | 11.1 | Time spent collecting stats | | ruby_sampler_duration_seconds_total | Counter | 11.1 | Time spent collecting stats |
| ruby_process_cpu_seconds_total | Gauge | 11.11 | Total amount of CPU time per process |
| ruby_process_max_fds | Gauge | 11.11 | Maximum number of open file descriptors per process |
| ruby_process_resident_memory_bytes | Gauge | 11.11 | Memory usage by process, measured in bytes |
| ruby_process_start_time_seconds | Gauge | 11.11 | The elapsed time between system boot and the process started, measured in seconds |
[GC.stat]: https://ruby-doc.org/core-2.3.0/GC.html#method-c-stat [GC.stat]: https://ruby-doc.org/core-2.3.0/GC.html#method-c-stat
... ...
......
...@@ -434,7 +434,8 @@ sudo -u git -H editor config/resque.yml ...@@ -434,7 +434,8 @@ sudo -u git -H editor config/resque.yml
``` ```
CAUTION: **Caution:** CAUTION: **Caution:**
Make sure to edit both `gitlab.yml` and `unicorn.rb` to match your setup. Make sure to edit both `gitlab.yml` and `unicorn.rb` to match your setup.
If you want to use Puma web server, see [Using Puma](#using-puma) for the additional steps.
NOTE: **Note:** NOTE: **Note:**
If you want to use HTTPS, see [Using HTTPS](#using-https) for the additional steps. If you want to use HTTPS, see [Using HTTPS](#using-https) for the additional steps.
...@@ -875,6 +876,25 @@ You also need to change the corresponding options (e.g. `ssh_user`, `ssh_host`, ...@@ -875,6 +876,25 @@ You also need to change the corresponding options (e.g. `ssh_user`, `ssh_host`,
Apart from the always supported markdown style, there are other rich text files that GitLab can display. But you might have to install a dependency to do so. See the [github-markup gem README](https://github.com/gitlabhq/markup#markups) for more information. Apart from the always supported markdown style, there are other rich text files that GitLab can display. But you might have to install a dependency to do so. See the [github-markup gem README](https://github.com/gitlabhq/markup#markups) for more information.
### Using Puma
Puma is a multi-threaded HTTP 1.1 server for Ruby applications.
To use GitLab with Puma:
1. Finish GitLab setup so you have it up and running.
1. Copy the supplied example Puma config file into place:
```sh
cd /home/git/gitlab
# Copy config file for the web server
sudo -u git -H config/puma.rb.example config/puma.rb
```
1. Edit the system `init.d` script to use `EXPERIMENTAL_PUMA=1` flag. If you have `/etc/default/gitlab`, then you should edit it instead.
1. Restart GitLab.
## Troubleshooting ## Troubleshooting
### "You appear to have cloned an empty repository." ### "You appear to have cloned an empty repository."
... ...
......
...@@ -118,7 +118,15 @@ will be synced to your Group and you can visualize it from the ...@@ -118,7 +118,15 @@ will be synced to your Group and you can visualize it from the
![Additional minutes](img/additional_minutes.png) ![Additional minutes](img/additional_minutes.png)
NOTE: **Important note**: If you have some minutes used over your default quota, these minutes will Be aware that:
1. If you have purchased extra CI minutes before the purchase of a paid plan,
we will calculate a pro-rated charge for your paid plan. That means you may
be charged for less than one year since your subscription was previously
created with the extra CI minutes.
1. Once the extra CI minutes has been assigned to a Group they cannot be transferred
to a different Group.
1. If you have some minutes used over your default quota, these minutes will
be deducted from your Additional Minutes quota immediately after your purchase of additional be deducted from your Additional Minutes quota immediately after your purchase of additional
minutes. minutes.
... ...
......
...@@ -122,12 +122,13 @@ container_scanning: ...@@ -122,12 +122,13 @@ container_scanning:
## https://docs.gitlab.com/ee/ci/variables/#predefined-environment-variables ## https://docs.gitlab.com/ee/ci/variables/#predefined-environment-variables
CI_APPLICATION_REPOSITORY: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG CI_APPLICATION_REPOSITORY: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
CI_APPLICATION_TAG: $CI_COMMIT_SHA CI_APPLICATION_TAG: $CI_COMMIT_SHA
CLAIR_LOCAL_SCAN_VERSION: v2.0.8_fe9b059d930314b54c78f75afe265955faf4fdc1
allow_failure: true allow_failure: true
services: services:
- docker:stable-dind - docker:stable-dind
script: script:
- docker run -d --name db arminc/clair-db:latest - docker run -d --name db arminc/clair-db:latest
- docker run -p 6060:6060 --link db:postgres -d --name clair --restart on-failure arminc/clair-local-scan:v2.0.6 - docker run -p 6060:6060 --link db:postgres -d --name clair --restart on-failure arminc/clair-local-scan:${CLAIR_LOCAL_SCAN_VERSION}
- apk add -U wget ca-certificates - apk add -U wget ca-certificates
- docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} - docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG}
- wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64 - wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64
...@@ -164,12 +165,13 @@ container_scanning: ...@@ -164,12 +165,13 @@ container_scanning:
## https://docs.gitlab.com/ee/ci/variables/#predefined-environment-variables ## https://docs.gitlab.com/ee/ci/variables/#predefined-environment-variables
CI_APPLICATION_REPOSITORY: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG CI_APPLICATION_REPOSITORY: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
CI_APPLICATION_TAG: $CI_COMMIT_SHA CI_APPLICATION_TAG: $CI_COMMIT_SHA
CLAIR_LOCAL_SCAN_VERSION: v2.0.8_fe9b059d930314b54c78f75afe265955faf4fdc1
allow_failure: true allow_failure: true
services: services:
- docker:stable-dind - docker:stable-dind
script: script:
- docker run -d --name db arminc/clair-db:latest - docker run -d --name db arminc/clair-db:latest
- docker run -p 6060:6060 --link db:postgres -d --name clair --restart on-failure arminc/clair-local-scan:v2.0.6 - docker run -p 6060:6060 --link db:postgres -d --name clair --restart on-failure arminc/clair-local-scan:${CLAIR_LOCAL_SCAN_VERSION}
- apk add -U wget ca-certificates - apk add -U wget ca-certificates
- docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} - docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG}
- wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64 - wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64
... ...
......
doc/user/group/dependency_proxy/img/group_dependency_proxy.png

39.2 KiB

# Dependency Proxy **[PREMIUM]**
> [Introduced](https://gitlab.com/gitlab-org/gitlab-ee/issues/7934) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.11.
NOTE: **Note:**
This is the user guide. In order to use the dependency proxy, an administrator
must first [configure it](../../../administration/dependency_proxy.md).
For many organizations, it is desirable to have a local proxy for frequently used
upstream images/packages. In the case of CI/CD, the proxy is responsible for
receiving a request and returning the upstream image from a registry, acting
as a pull-through cache.
The dependency proxy is available in the group level. To access it, navigate to
a group's **Overview > Dependency Proxy**.
![Dependency Proxy group page](img/group_dependency_proxy.png)
## Supported dependency proxies
NOTE: **Note:**
For a list of the upcoming additions to the proxies, visit the
[direction page](https://about.gitlab.com/direction/package/dependency_proxy/#top-vision-items).
The following dependency proxies are supported.
| Dependency proxy | GitLab version |
| ---------------- | -------------- |
| Docker | 11.11+ |
## Using the Docker dependency proxy
With the Docker dependency proxy, you can use GitLab as a source for a Docker image.
To get a Docker image into the dependency proxy:
1. Find the proxy URL on your group's page under **Overview > Dependency Proxy**,
for example `gitlab.com/groupname/dependency_proxy/containers`.
1. Trigger GitLab to pull the Docker image you want (e.g., `alpine:latest` or
`linuxserver/nextcloud:latest`) and store it in the proxy storage by using
one of the following ways:
- Manually pulling the Docker image:
```bash
docker pull gitlab.com/groupname/dependency_proxy/containers/alpine:latest
```
- From a `Dockerfile`:
```bash
FROM gitlab.com/groupname/dependency_proxy/containers/alpine:latest
```
- In [`.gitlab-ci.yml`](../../../ci/yaml/README.md#image):
```bash
image: gitlab.com/groupname/dependency_proxy/containers/alpine:latest
```
GitLab will then pull the Docker image from Docker Hub and will cache the blobs
on the GitLab server. The next time you pull the same image, it will get the latest
information about the image from Docker Hub but will serve the existing blobs
from GitLab.
The blobs are kept forever, and there is no hard limit on how much data can be
stored.
## Limitations
The following limitations apply:
- Only public groups are supported (authentication is not supported yet).
- Only Docker Hub is supported.
- This feature requires Docker Hub being available.
...@@ -368,5 +368,9 @@ and issues) performed by your group members. ...@@ -368,5 +368,9 @@ and issues) performed by your group members.
With [GitLab Issues Analytics](issues_analytics/index.md), in groups, you can see a bar chart of the number of issues created each month. With [GitLab Issues Analytics](issues_analytics/index.md), in groups, you can see a bar chart of the number of issues created each month.
## Dependency Proxy **[PREMIUM]**
Use GitLab as a [dependency proxy](dependency_proxy/index.md) for upstream Docker images.
[ee]: https://about.gitlab.com/pricing/ [ee]: https://about.gitlab.com/pricing/
[ee-2534]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2534 [ee-2534]: https://gitlab.com/gitlab-org/gitlab-ee/issues/2534
...@@ -22,8 +22,16 @@ SAML SSO for groups is used only as a convenient way to add users and does not s ...@@ -22,8 +22,16 @@ SAML SSO for groups is used only as a convenient way to add users and does not s
![Issuer and callback for configuring SAML identity provider with GitLab.com](img/group_saml_configuration_information.png) ![Issuer and callback for configuring SAML identity provider with GitLab.com](img/group_saml_configuration_information.png)
NOTE: **Note:** ### SSO enforcement
Partial SSO enforcement was introduced in [11.8](https://gitlab.com/gitlab-org/gitlab-ee/issues/5291). With this option enabled, users must use your group's GitLab single sign on URL to be added to the group or be added via SCIM. Users can no longer be added manually. After a user has been added to the group, GitLab does not continue to enforce the use of SSO, but we'll [add a persistent check](https://gitlab.com/gitlab-org/gitlab-ee/issues/9255) in a later version.
SSO enforcement was:
- [Introduced in GitLab 11.8](https://gitlab.com/gitlab-org/gitlab-ee/issues/5291).
- [Improved upon in GitLab 11.11 with ongoing enforcement in the GitLab UI](https://gitlab.com/gitlab-org/gitlab-ee/issues/9255).
With this option enabled, users must use your group's GitLab single sign on URL to be added to the group or be added via SCIM. Users cannot be added manually, and may only access project/group resources via the UI by signing in through the SSO URL.
We intend to add a similar SSO requirement for [Git and API activity](https://gitlab.com/gitlab-org/gitlab-ee/issues/9152) in the future.
### NameID ### NameID
... ...
......
...@@ -23,32 +23,25 @@ module Gitlab ...@@ -23,32 +23,25 @@ module Gitlab
end end
def init_metrics def init_metrics
metrics = { metrics = {}
file_descriptors: ::Gitlab::Metrics.gauge(with_prefix(:file, :descriptors), 'File descriptors used', labels, :livesum), metrics[:sampler_duration] = ::Gitlab::Metrics.counter(with_prefix(:sampler, :duration_seconds_total), 'Sampler time', labels)
memory_bytes: ::Gitlab::Metrics.gauge(with_prefix(:memory, :bytes), 'Memory used', labels, :livesum), metrics[:total_time] = ::Gitlab::Metrics.counter(with_prefix(:gc, :duration_seconds_total), 'Total GC time', labels)
process_cpu_seconds_total: ::Gitlab::Metrics.gauge(with_prefix(:process, :cpu_seconds_total), 'Process CPU seconds total'),
process_max_fds: ::Gitlab::Metrics.gauge(with_prefix(:process, :max_fds), 'Process max fds'),
process_resident_memory_bytes: ::Gitlab::Metrics.gauge(with_prefix(:process, :resident_memory_bytes), 'Memory used', labels, :livesum),
process_start_time_seconds: ::Gitlab::Metrics.gauge(with_prefix(:process, :start_time_seconds), 'Process start time seconds'),
sampler_duration: ::Gitlab::Metrics.counter(with_prefix(:sampler, :duration_seconds_total), 'Sampler time', labels),
total_time: ::Gitlab::Metrics.counter(with_prefix(:gc, :duration_seconds_total), 'Total GC time', labels)
}
GC.stat.keys.each do |key| GC.stat.keys.each do |key|
metrics[key] = ::Gitlab::Metrics.gauge(with_prefix(:gc_stat, key), to_doc_string(key), labels, :livesum) metrics[key] = ::Gitlab::Metrics.gauge(with_prefix(:gc_stat, key), to_doc_string(key), labels, :livesum)
end end
metrics[:memory_usage] = ::Gitlab::Metrics.gauge(with_prefix(:memory, :bytes), 'Memory used', labels, :livesum)
metrics[:file_descriptors] = ::Gitlab::Metrics.gauge(with_prefix(:file, :descriptors), 'File descriptors used', labels, :livesum)
metrics metrics
end end
def sample def sample
start_time = System.monotonic_time start_time = System.monotonic_time
metrics[:memory_usage].set(labels.merge(worker_label), System.memory_usage)
metrics[:file_descriptors].set(labels.merge(worker_label), System.file_descriptor_count) metrics[:file_descriptors].set(labels.merge(worker_label), System.file_descriptor_count)
metrics[:process_cpu_seconds_total].set(labels.merge(worker_label), ::Gitlab::Metrics::System.cpu_time)
metrics[:process_max_fds].set(labels.merge(worker_label), ::Gitlab::Metrics::System.max_open_file_descriptors)
metrics[:process_start_time_seconds].set(labels.merge(worker_label), ::Gitlab::Metrics::System.process_start_time)
set_memory_usage_metrics
sample_gc sample_gc
metrics[:sampler_duration].increment(labels, System.monotonic_time - start_time) metrics[:sampler_duration].increment(labels, System.monotonic_time - start_time)
...@@ -68,14 +61,6 @@ module Gitlab ...@@ -68,14 +61,6 @@ module Gitlab
metrics[:total_time].increment(labels, GC::Profiler.total_time) metrics[:total_time].increment(labels, GC::Profiler.total_time)
end end
def set_memory_usage_metrics
memory_usage = System.memory_usage
memory_labels = labels.merge(worker_label)
metrics[:memory_bytes].set(memory_labels, memory_usage)
metrics[:process_resident_memory_bytes].set(memory_labels, memory_usage)
end
def worker_label def worker_label
return {} unless defined?(Unicorn::Worker) return {} unless defined?(Unicorn::Worker)
... ...
......
...@@ -8,16 +8,12 @@ module Gitlab ...@@ -8,16 +8,12 @@ module Gitlab
super(interval) super(interval)
end end
def metrics def unicorn_active_connections
@metrics ||= init_metrics @unicorn_active_connections ||= ::Gitlab::Metrics.gauge(:unicorn_active_connections, 'Unicorn active connections', {}, :max)
end end
def init_metrics def unicorn_queued_connections
{ @unicorn_queued_connections ||= ::Gitlab::Metrics.gauge(:unicorn_queued_connections, 'Unicorn queued connections', {}, :max)
unicorn_active_connections: ::Gitlab::Metrics.gauge(:unicorn_active_connections, 'Unicorn active connections', {}, :max),
unicorn_queued_connections: ::Gitlab::Metrics.gauge(:unicorn_queued_connections, 'Unicorn queued connections', {}, :max),
unicorn_workers: ::Gitlab::Metrics.gauge(:unicorn_workers, 'Unicorn workers')
}
end end
def enabled? def enabled?
...@@ -27,13 +23,14 @@ module Gitlab ...@@ -27,13 +23,14 @@ module Gitlab
def sample def sample
Raindrops::Linux.tcp_listener_stats(tcp_listeners).each do |addr, stats| Raindrops::Linux.tcp_listener_stats(tcp_listeners).each do |addr, stats|
set_unicorn_connection_metrics('tcp', addr, stats) unicorn_active_connections.set({ socket_type: 'tcp', socket_address: addr }, stats.active)
unicorn_queued_connections.set({ socket_type: 'tcp', socket_address: addr }, stats.queued)
end end
Raindrops::Linux.unix_listener_stats(unix_listeners).each do |addr, stats| Raindrops::Linux.unix_listener_stats(unix_listeners).each do |addr, stats|
set_unicorn_connection_metrics('unix', addr, stats) unicorn_active_connections.set({ socket_type: 'unix', socket_address: addr }, stats.active)
unicorn_queued_connections.set({ socket_type: 'unix', socket_address: addr }, stats.queued)
end end
metrics[:unicorn_workers].set({}, unicorn_workers_count)
end end
private private
...@@ -42,13 +39,6 @@ module Gitlab ...@@ -42,13 +39,6 @@ module Gitlab
@tcp_listeners ||= Unicorn.listener_names.grep(%r{\A[^/]+:\d+\z}) @tcp_listeners ||= Unicorn.listener_names.grep(%r{\A[^/]+:\d+\z})
end end
def set_unicorn_connection_metrics(type, addr, stats)
labels = { socket_type: type, socket_address: addr }
metrics[:unicorn_active_connections].set(labels, stats.active)
metrics[:unicorn_queued_connections].set(labels, stats.queued)
end
def unix_listeners def unix_listeners
@unix_listeners ||= Unicorn.listener_names - tcp_listeners @unix_listeners ||= Unicorn.listener_names - tcp_listeners
end end
...@@ -56,10 +46,6 @@ module Gitlab ...@@ -56,10 +46,6 @@ module Gitlab
def unicorn_with_listeners? def unicorn_with_listeners?
defined?(Unicorn) && Unicorn.listener_names.any? defined?(Unicorn) && Unicorn.listener_names.any?
end end
def unicorn_workers_count
Sys::ProcTable.ps.select {|p| p.cmdline.match(/unicorn_rails worker.+ #{Rails.root.to_s}/)}.count
end
end end
end end
end end
... ...
......