Skip to content
Snippets Groups Projects
Unverified Commit fa06ef9c authored by Kamil Trzcinski's avatar Kamil Trzcinski
Browse files

Read docker registry configuration file

parent ecc8ad8c
No related branches found
No related tags found
No related merge requests found
Loading
Loading
@@ -24,37 +24,26 @@ By default it runs in dry run mode (no changes). When run with `-delete` it will
 
If you run `-delete -soft-delete=false` you will remove data forever.
 
### Run (filesystem)
### Run
 
Dry run:
 
```bash
docker-distribution-pruner -storage=filesystem -fs-root-dir=/path/to/registry/storage
docker-distribution-pruner -config=/path/to/registry/configuration
```
 
Reclaim disk space:
 
```bash
docker-distribution-pruner -storage=filesystem -fs-root-dir=/path/to/registry/storage -delete
docker-distribution-pruner -config=/path/to/registry/configuration -delete
```
 
### Run (s3)
### GitLab Omnibus
 
Configure credentials:
```
aws configure
```
Dry run:
```bash
docker-distribution-pruner -storage=s3 -s3-bucket=my-bucket
```
Reclaim disk space:
Run:
 
```bash
docker-distribution-pruner -storage=s3 -s3-bucket=my-bucket -delete
docker-distribution-pruner -config=/var/opt/gitlab/registry/config.yml
```
 
### S3 effectiveness
Loading
Loading
@@ -99,14 +88,14 @@ It is highly not advised to change these options as it can leave left-overs in r
 
```
Usage of docker-distribution-pruner:
-config string
Path to registry config file
-debug
Print debug messages
-delete
Delete data, instead of dry run
-delete-old-tag-versions
Delete old tag versions (default true)
-fs-root-dir string
root directory (default "examples/registry")
-ignore-blobs
Ignore blobs processing and recycling
-jobs int
Loading
Loading
@@ -119,20 +108,12 @@ Usage of docker-distribution-pruner:
Number of concurrent parallel walk jobs to execute (default 10)
-repository-csv-output string
File to which CSV will be written with all metrics (default "repositories.csv")
-s3-bucket string
s3 bucket
-s3-region string
s3 region (default "us-east-1")
-s3-root-dir string
s3 root directory
-s3-storage-cache string
s3 cache (default "tmp-cache")
-soft-delete
When deleting, do not remove, but move to backup/ folder (default true)
-soft-errors
Print errors, but do not fail
-storage string
Storage type to use: filesystem or s3
-verbose
Print verbose messages (default true)
```
Loading
Loading
package main
import (
"errors"
"io/ioutil"
"gopkg.in/yaml.v2"
)
type distributionStorageFilesystem struct {
RootDirectory string `yaml:"rootdirectory"`
}
type distributionStorageS3 struct {
AccessKey string `yaml:"accesskey"`
SecretKey string `yaml:"secretkey"`
Bucket string `yaml:"bucket"`
Region *string `yaml:"region"`
RegionEndpoint *string `yaml:"regionendpoint"`
RootDirectory string `yaml:"rootdirectory"`
}
type distributionStorage struct {
Filesystem *distributionStorageFilesystem `yaml:"filesystem"`
S3 *distributionStorageS3 `yaml:"s3"`
}
type distributionConfig struct {
Version string `yaml:"version"`
Storage distributionStorage `yaml:"storage"`
}
func storageFromConfig(configFile string) (storageObject, error) {
data, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
config := &distributionConfig{}
err = yaml.Unmarshal(data, &config)
if err != nil {
return nil, err
}
if config.Version != "0.1" {
return nil, errors.New("only 0.1 version is supported")
}
if config.Storage.Filesystem != nil && config.Storage.S3 != nil {
return nil, errors.New("multiple storages defined")
}
if config.Storage.Filesystem != nil {
return newFilesystemStorage(config.Storage.Filesystem)
} else if config.Storage.S3 != nil {
return newS3Storage(config.Storage.S3)
} else {
return nil, errors.New("unsupported storage")
}
}
package main
 
import (
"flag"
"io/ioutil"
"os"
"path/filepath"
Loading
Loading
@@ -9,20 +8,15 @@ import (
)
 
type fsStorage struct {
}
var fsRootDir = flag.String("fs-root-dir", "examples/registry", "root directory")
func newFsStorage() storageObject {
return &fsStorage{}
*distributionStorageFilesystem
}
 
func (f *fsStorage) fullPath(path string) string {
return filepath.Join(*fsRootDir, "docker", "registry", "v2", path)
return filepath.Join(f.RootDirectory, "docker", "registry", "v2", path)
}
 
func (f *fsStorage) backupPath(path string) string {
return filepath.Join(*fsRootDir, "docker_backup", "registry", "v2", path)
return filepath.Join(f.RootDirectory, "docker_backup", "registry", "v2", path)
}
 
func (f *fsStorage) Walk(rootDir string, baseDir string, fn walkFunc) error {
Loading
Loading
@@ -110,3 +104,7 @@ func (f *fsStorage) Move(path, newPath string) error {
 
func (f *fsStorage) Info() {
}
func newFilesystemStorage(config *distributionStorageFilesystem) (storageObject, error) {
return &fsStorage{config}, nil
}
Loading
Loading
@@ -10,9 +10,9 @@ import (
)
 
var (
config = flag.String("config", "", "Path to registry config file")
debug = flag.Bool("debug", false, "Print debug messages")
verbose = flag.Bool("verbose", true, "Print verbose messages")
storage = flag.String("storage", "", "Storage type to use: filesystem or s3")
jobs = flag.Int("jobs", 10, "Number of concurrent jobs to execute")
parallelWalkJobs = flag.Int("parallel-walk-jobs", 10, "Number of concurrent parallel walk jobs to execute")
ignoreBlobs = flag.Bool("ignore-blobs", false, "Ignore blobs processing and recycling")
Loading
Loading
@@ -45,17 +45,15 @@ func main() {
 
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
 
var err error
switch *storage {
case "filesystem":
currentStorage = newFsStorage()
case "s3":
currentStorage = newS3Storage()
if *config == "" {
flag.Usage()
os.Exit(1)
}
 
default:
logrus.Fatalln("Unknown storage specified:", *storage)
var err error
currentStorage, err = storageFromConfig(*config)
if err != nil {
logrus.Fatalln(err)
}
 
blobs := make(blobsData)
Loading
Loading
Loading
Loading
@@ -10,6 +10,7 @@ import (
 
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
Loading
Loading
@@ -17,6 +18,7 @@ import (
const listMax = 1000
 
type s3Storage struct {
*distributionStorageS3
S3 *s3.S3
apiCalls int64
expensiveApiCalls int64
Loading
Loading
@@ -25,28 +27,14 @@ type s3Storage struct {
cacheMiss int64
}
 
var s3RootDir = flag.String("s3-root-dir", "", "s3 root directory")
var s3Bucket = flag.String("s3-bucket", "", "s3 bucket")
var s3Region = flag.String("s3-region", "us-east-1", "s3 region")
var s3CacheStorage = flag.String("s3-storage-cache", "tmp-cache", "s3 cache")
 
func newS3Storage() storageObject {
sess, err := session.NewSession()
if err != nil {
panic(err)
}
return &s3Storage{
S3: s3.New(sess, aws.NewConfig().WithRegion(*s3Region)),
}
}
func (f *s3Storage) fullPath(path string) string {
return filepath.Join(*s3RootDir, "docker", "registry", "v2", path)
return filepath.Join(f.RootDirectory, "docker", "registry", "v2", path)
}
 
func (f *s3Storage) backupPath(path string) string {
return filepath.Join(*s3RootDir, "docker-backup", "registry", "v2", path)
return filepath.Join(f.RootDirectory, "docker-backup", "registry", "v2", path)
}
 
func (f *s3Storage) Walk(path string, baseDir string, fn walkFunc) error {
Loading
Loading
@@ -62,7 +50,7 @@ func (f *s3Storage) Walk(path string, baseDir string, fn walkFunc) error {
 
atomic.AddInt64(&f.apiCalls, 1)
resp, err := f.S3.ListObjects(&s3.ListObjectsInput{
Bucket: s3Bucket,
Bucket: aws.String(f.Bucket),
Prefix: aws.String(path),
MaxKeys: aws.Int64(listMax),
})
Loading
Loading
@@ -104,7 +92,7 @@ func (f *s3Storage) Walk(path string, baseDir string, fn walkFunc) error {
if *resp.IsTruncated {
atomic.AddInt64(&f.apiCalls, 1)
resp, err = f.S3.ListObjects(&s3.ListObjectsInput{
Bucket: s3Bucket,
Bucket: aws.String(f.Bucket),
Prefix: aws.String(path),
MaxKeys: aws.Int64(listMax),
Marker: aws.String(lastKey),
Loading
Loading
@@ -128,7 +116,7 @@ func (f *s3Storage) List(path string, fn walkFunc) error {
 
atomic.AddInt64(&f.apiCalls, 1)
resp, err := f.S3.ListObjects(&s3.ListObjectsInput{
Bucket: s3Bucket,
Bucket: aws.String(f.Bucket),
Prefix: aws.String(path),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
Loading
Loading
@@ -186,7 +174,7 @@ func (f *s3Storage) List(path string, fn walkFunc) error {
if *resp.IsTruncated {
atomic.AddInt64(&f.apiCalls, 1)
resp, err = f.S3.ListObjects(&s3.ListObjectsInput{
Bucket: s3Bucket,
Bucket: aws.String(f.Bucket),
Prefix: aws.String(path),
MaxKeys: aws.Int64(listMax),
Delimiter: aws.String("/"),
Loading
Loading
@@ -222,7 +210,7 @@ func (f *s3Storage) Read(path string, etag string) ([]byte, error) {
 
atomic.AddInt64(&f.apiCalls, 1)
resp, err := f.S3.GetObject(&s3.GetObjectInput{
Bucket: s3Bucket,
Bucket: aws.String(f.Bucket),
Key: aws.String(f.fullPath(path)),
})
 
Loading
Loading
@@ -247,7 +235,7 @@ func (f *s3Storage) Read(path string, etag string) ([]byte, error) {
func (f *s3Storage) Delete(path string) error {
atomic.AddInt64(&f.expensiveApiCalls, 1)
_, err := f.S3.DeleteObject(&s3.DeleteObjectInput{
Bucket: s3Bucket,
Bucket: aws.String(f.Bucket),
Key: aws.String(f.fullPath(path)),
})
return err
Loading
Loading
@@ -256,8 +244,8 @@ func (f *s3Storage) Delete(path string) error {
func (f *s3Storage) Move(path, newPath string) error {
atomic.AddInt64(&f.expensiveApiCalls, 1)
_, err := f.S3.CopyObject(&s3.CopyObjectInput{
CopySource: aws.String("/" + *s3Bucket + "/" + f.fullPath(path)),
Bucket: s3Bucket,
CopySource: aws.String("/" + f.Bucket + "/" + f.fullPath(path)),
Bucket: aws.String(f.Bucket),
Key: aws.String(f.backupPath(newPath)),
})
if err != nil {
Loading
Loading
@@ -270,3 +258,21 @@ func (f *s3Storage) Info() {
logrus.Infoln("S3 INFO: API calls/expensive:", f.apiCalls, f.expensiveApiCalls,
"Cache (hit/miss/error):", f.cacheHits, f.cacheMiss, f.cacheError)
}
func newS3Storage(config *distributionStorageS3) (storageObject, error) {
awsConfig := aws.NewConfig()
awsConfig.Endpoint = config.RegionEndpoint
awsConfig.Region = config.Region
awsConfig.Credentials = credentials.NewStaticCredentials(config.AccessKey, config.SecretKey, "")
sess, err := session.NewSession()
if err != nil {
return nil, err
}
storage := &s3Storage{
distributionStorageS3: config,
S3: s3.New(sess, awsConfig),
}
return storage, err
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment